gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import conf
import g
from grax.access_infer import Access_Infer
from grax.access_style import Access_Style
from grax.access_level import Access_Level
from grax.access_scope import Access_Scope
from gwis.exception.gwis_error import GWIS_Error
from gwis.exception.gwis_warning import GWIS_Warning
from item import grac_record
from item import item_base
from item import item_versioned
from util_ import misc
__all__ = ['One', 'Many']
log = g.log.getLogger('groupy_base')
class One(grac_record.One):
item_type_id = None # Abstract
item_type_table = None
item_gwis_abbrev = None
child_item_types = None
local_defns = [
# py/psql name, deft, send?, pkey?, pytyp, reqv, abbrev
# These are technically required for gm and gia, but we don't check
# until from_gml_group_id is called.
('group_id', None, True, True, int, 0, 'gpid',),
('group_name', None, True, None, str, 0,),
]
attr_defns = grac_record.One.attr_defns + local_defns
psql_defns = grac_record.One.psql_defns + local_defns
gwis_defns = item_base.One.attr_defns_reduce_for_gwis(attr_defns)
__slots__ = [] + [attr_defn[0] for attr_defn in local_defns]
# *** Constructor
def __init__(self, qb=None, row=None, req=None, copy_from=None):
g.assurt(copy_from is None) # Not supported for this class.
grac_record.One.__init__(self, qb, row, req, copy_from)
# ***
#
def from_gml_group_id(self, qb, required=True):
self.group_id = Many.from_gml_group_id_(qb.db,
self.group_id,
self.group_name,
required)
g.assurt((not required) or (self.group_id > 0))
#
def save_core(self, qb):
# Avoid not-null constraints in item_stack by setting unused attrs.
self.access_style_id = Access_Style.all_denied
item_versioned.One.save_core(self, qb)
#
def save_core_pre_save_get_acs(self, qb):
# This is redundant since we set this is save_core. That is, this fcn. is
# never called. But we still want to override the base class, to be
# thorough.
return Access_Style.all_denied
# ***
class Many(grac_record.Many):
one_class = One
__slots__ = ()
public_group_id_ = None
session_group_id_ = None
stealth_group_id_ = None
# *** Constructor
def __init__(self):
grac_record.Many.__init__(self)
# *** Public interface
# Get the public group ID.
# EXPLAIN: How is this different than the private _user_anon_instance group?
@staticmethod
def public_group_id(db):
if (Many.public_group_id_ is None):
# Get the group ID of the public group.
Many.public_group_id_ = int(db.sql(
"SELECT cp_group_public_id() AS grp_id")[0]['grp_id'])
#log.debug('Many.public_group_id_ = %d' % (Many.public_group_id_,))
g.assurt(Many.public_group_id_ > 0)
return Many.public_group_id_
#
@staticmethod
def session_group_id(db):
if (Many.session_group_id_ is None):
# Get the group ID of the stealth group.
was_dont_fetchall = db.dont_fetchall
db.dont_fetchall = False
rows = db.sql("SELECT cp_group_session_id() AS grp_id")
g.assurt(len(rows) == 1)
Many.session_group_id_ = int(rows[0]['grp_id'])
#log.debug('Many.session_group_id_ = %d' % (Many.session_group_id_,))
g.assurt(Many.session_group_id_ > 0)
db.dont_fetchall = was_dont_fetchall
return Many.session_group_id_
#
# This gets the stack ID of the 'Stealth-Secret Group'.
@staticmethod
def stealth_group_id(db):
if (Many.stealth_group_id_ is None):
# Get the group ID of the stealth group.
was_dont_fetchall = db.dont_fetchall
db.dont_fetchall = False
rows = db.sql("SELECT cp_group_stealth_id() AS grp_id")
g.assurt(len(rows) == 1)
Many.stealth_group_id_ = int(rows[0]['grp_id'])
#log.debug('Many.stealth_group_id_ = %d' % (Many.stealth_group_id_,))
g.assurt(Many.stealth_group_id_ > 0)
db.dont_fetchall = was_dont_fetchall
return Many.stealth_group_id_
#
@staticmethod
def cp_group_private_id(db, username):
rows = db.sql(
"""
SELECT
grp.stack_id AS group_id
FROM
user_ AS usr
JOIN
group_membership AS gmp
ON gmp.user_id = usr.id
JOIN
group_ AS grp
ON grp.stack_id = gmp.group_id
WHERE
usr.username = %s
AND grp.access_scope_id = %s
AND gmp.access_level_id < %s
AND gmp.valid_until_rid = %s
AND gmp.deleted IS FALSE
""", (username,
Access_Scope.private,
Access_Level.denied,
conf.rid_inf,))
if rows:
g.assurt(len(rows) == 1)
group_id = int(rows[0]['group_id'])
g.assurt(group_id > 0)
else:
group_id = None
return group_id
#
@staticmethod
def cp_group_shared_id(db, group_name):
rows = db.sql(
"""
SELECT
grp.stack_id AS group_id
FROM
group_ AS grp
WHERE
grp.name = %s
AND grp.access_scope_id = %s
AND grp.valid_until_rid = %s
AND grp.deleted IS FALSE
""", (group_name,
Access_Scope.shared,
conf.rid_inf,))
if rows:
if len(rows) != 1:
log.error('cp_group_shared_id: found %d rows for "%s"'
% (len(rows), group_name,))
g.assurt(False)
group_id = int(rows[0]['group_id'])
g.assurt(group_id > 0)
else:
group_id = None
return group_id
#
@staticmethod
def from_gml_group_id_(db, grp_id, grp_nm, required=True):
group_id = None
if bool(grp_id and grp_nm):
raise GWIS_Error(
'Attr. confusions: Please specify just "group_id" or "group_name"')
elif (not grp_id) and (not grp_nm):
if required:
raise GWIS_Error(
'Missing mandatory attr: "group_id" or "group_name"')
elif not grp_id:
group_id = Many.group_id_from_group_name(db, grp_nm)
log.debug('from_gml: resolved group_id %d from group_name "%s".'
% (group_id, grp_nm,))
return group_id
#
@staticmethod
def group_id_from_group_name(db, group_name, restrict_scope=None):
group_id = None
#
if (((restrict_scope is None) or (restrict_scope == Access_Scope.shared))
and (group_id is None)):
group_id = Many.cp_group_shared_id(db, group_name)
if group_id is None:
# Well, it's not a shared group. If the user asked for it, complain
if restrict_scope == Access_Scope.shared:
raise GWIS_Warning('No shared group for "%s"' % (group_name,))
#
if (((restrict_scope is None)
or (restrict_scope == Access_Scope.private))
and (group_id is None)):
group_id = Many.cp_group_private_id(db, group_name)
if group_id is None:
if restrict_scope == Access_Scope.private:
raise GWIS_Warning('No private group for "%s"' % (username,))
#
if (((restrict_scope is None)
or (restrict_scope == Access_Scope.public))
and (group_id is None)
# MAYBE: Is this okay? MAGIC_NAME: 'Public' user group.
and (group_name == 'Public')):
group_id = Many.public_group_id(db)
#
# MAYBE: Do we care about Many.stealth_group_id(db)?
#
if not group_id:
raise GWIS_Warning('Named group not found or not permitted: "%s"'
% (group_name,))
return group_id
# ***
# ***
| |
import httplib as http
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed
def get_resource_object_member(error_key, context):
from api.base.serializers import RelationshipField
field = context['view'].serializer_class._declared_fields.get(error_key, None)
if field:
return 'relationships' if isinstance(field, RelationshipField) else 'attributes'
# If field cannot be found (where read/write operations have different serializers,
# assume error was in 'attributes' by default
return 'attributes'
def dict_error_formatting(errors, context, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.items():
if isinstance(error_description, basestring):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
else:
formatted_error_list.extend([{'source': {'pointer': '/data/{}{}/'.format(index, get_resource_object_member(error_key, context)) + error_key}, 'detail': reason} for reason in error_description])
return formatted_error_list
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, context, index=None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, context, index=index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
def format_validation_error(e):
error_list = []
for key, value in e.message_dict.items():
error_list.append('There was an issue with the {} field. {}'.format(key, value[0]))
return error_list
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(
detail='The requested user is no longer available.',
meta={
'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url(),
},
)
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter,
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute),
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http.BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators,
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type,
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http.BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http.BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| |
from django.db import IntegrityError
from rest_framework import exceptions
from rest_framework import serializers as ser
from osf.models import AbstractNode, Node, Collection, Guid, Registration, CollectionProvider
from osf.exceptions import ValidationError, NodeStateError
from api.base.serializers import LinksField, RelationshipField, LinkedNodesRelationshipSerializer, LinkedRegistrationsRelationshipSerializer, LinkedPreprintsRelationshipSerializer
from api.base.serializers import JSONAPISerializer, IDField, TypeField, VersionedDateTimeField
from api.base.exceptions import InvalidModelValueError, RelationshipPostMakesNoChanges
from api.base.utils import absolute_reverse, get_user_auth
from api.nodes.serializers import NodeLinksSerializer
from api.taxonomies.serializers import TaxonomizableSerializerMixin
from framework.exceptions import PermissionsError
from osf.utils.permissions import WRITE
class CollectionProviderRelationshipField(RelationshipField):
def get_object(self, provider_id):
return CollectionProvider.load(provider_id)
def to_internal_value(self, data):
provider = self.get_object(data)
return {'provider': provider}
class GuidRelationshipField(RelationshipField):
def get_object(self, _id):
return Guid.load(_id)
def to_internal_value(self, data):
guid = self.get_object(data)
return {'guid': guid}
class CollectionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'title',
'date_created',
'date_modified',
])
id = IDField(source='_id', read_only=True)
type = TypeField()
title = ser.CharField(required=True)
date_created = VersionedDateTimeField(source='created', read_only=True)
date_modified = VersionedDateTimeField(source='modified', read_only=True)
bookmarks = ser.BooleanField(read_only=False, default=False, source='is_bookmark_collection')
is_promoted = ser.BooleanField(read_only=True, default=False)
is_public = ser.BooleanField(read_only=False, default=False)
status_choices = ser.ListField(
child=ser.CharField(max_length=127),
default=list(),
)
collected_type_choices = ser.ListField(
child=ser.CharField(max_length=127),
default=list(),
)
volume_choices = ser.ListField(
child=ser.CharField(max_length=127),
default=list(),
)
issue_choices = ser.ListField(
child=ser.CharField(max_length=127),
default=list(),
)
program_area_choices = ser.ListField(
child=ser.CharField(max_length=127),
default=list(),
)
links = LinksField({})
provider = CollectionProviderRelationshipField(
related_view='providers:collection-providers:collection-provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=True,
)
node_links = RelationshipField(
related_view='collections:node-pointers',
related_view_kwargs={'collection_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
)
# TODO: Add a self link to this when it's available
linked_nodes = RelationshipField(
related_view='collections:linked-nodes',
related_view_kwargs={'collection_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='collections:collection-node-pointer-relationship',
self_view_kwargs={'collection_id': '<_id>'},
)
linked_registrations = RelationshipField(
related_view='collections:linked-registrations',
related_view_kwargs={'collection_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='collections:collection-registration-pointer-relationship',
self_view_kwargs={'collection_id': '<_id>'},
)
linked_preprints = RelationshipField(
related_view='collections:linked-preprints',
related_view_kwargs={'collection_id': '<_id>'},
self_view='collections:collection-preprint-pointer-relationship',
self_view_kwargs={'collection_id': '<_id>'},
related_meta={'count': 'get_preprint_links_count'},
)
collected_metadata = RelationshipField(
related_view='collections:collected-metadata-list',
related_view_kwargs={'collection_id': '<_id>'},
)
class Meta:
type_ = 'collections'
def get_absolute_url(self, obj):
return absolute_reverse(
'collections:collection-detail', kwargs={
'collection_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def get_node_links_count(self, obj):
auth = get_user_auth(self.context['request'])
node_ids = obj.guid_links.all().values_list('_id', flat=True)
return Node.objects.filter(guids___id__in=node_ids, is_deleted=False).can_view(user=auth.user, private_link=auth.private_link).count()
def get_registration_links_count(self, obj):
auth = get_user_auth(self.context['request'])
registration_ids = obj.guid_links.all().values_list('_id', flat=True)
return Registration.objects.filter(guids___id__in=registration_ids, is_deleted=False).can_view(user=auth.user, private_link=auth.private_link).count()
def get_preprint_links_count(self, obj):
auth = get_user_auth(self.context['request'])
return self.context['view'].collection_preprints(obj, auth.user).count()
def create(self, validated_data):
node = Collection(**validated_data)
node.category = ''
try:
node.save()
except ValidationError as e:
raise InvalidModelValueError(detail=e.messages[0])
except IntegrityError:
raise ser.ValidationError('Each user cannot have more than one Bookmark collection.')
return node
def update(self, collection, validated_data):
"""Update instance with the validated data.
"""
assert isinstance(collection, Collection), 'collection must be a Collection'
if validated_data:
for key, value in validated_data.items():
if key == 'title' and collection.is_bookmark_collection:
raise InvalidModelValueError('Bookmark collections cannot be renamed.')
setattr(collection, key, value)
try:
collection.save()
except ValidationError as e:
raise InvalidModelValueError(detail=e.messages[0])
return collection
class CollectionDetailSerializer(CollectionSerializer):
"""
Overrides CollectionSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class CollectionSubmissionSerializer(TaxonomizableSerializerMixin, JSONAPISerializer):
class Meta:
type_ = 'collected-metadata'
filterable_fields = frozenset([
'id',
'collected_type',
'date_created',
'date_modified',
'subjects',
'status',
])
id = IDField(source='guid._id', read_only=True)
type = TypeField()
creator = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
)
collection = RelationshipField(
related_view='collections:collection-detail',
related_view_kwargs={'collection_id': '<collection._id>'},
)
guid = RelationshipField(
related_view='guids:guid-detail',
related_view_kwargs={'guids': '<guid._id>'},
always_embed=True,
)
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'collections:collected-metadata-subjects'
@property
def subjects_self_view(self):
# Overrides TaxonomizableSerializerMixin
return 'collections:collected-metadata-relationships-subjects'
@property
def subjects_view_kwargs(self):
# Overrides TaxonomizableSerializerMixin
return {'collection_id': '<collection._id>', 'cgm_id': '<guid._id>'}
collected_type = ser.CharField(required=False)
status = ser.CharField(required=False)
volume = ser.CharField(required=False)
issue = ser.CharField(required=False)
program_area = ser.CharField(required=False)
def get_absolute_url(self, obj):
return absolute_reverse(
'collected-metadata:collected-metadata-detail',
kwargs={
'collection_id': obj.collection._id,
'cgm_id': obj.guid._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def update(self, obj, validated_data):
if validated_data and 'subjects' in validated_data:
auth = get_user_auth(self.context['request'])
subjects = validated_data.pop('subjects', None)
self.update_subjects(obj, subjects, auth)
if 'status' in validated_data:
obj.status = validated_data.pop('status')
if 'collected_type' in validated_data:
obj.collected_type = validated_data.pop('collected_type')
if 'volume' in validated_data:
obj.volume = validated_data.pop('volume')
if 'issue' in validated_data:
obj.issue = validated_data.pop('issue')
if 'program_area' in validated_data:
obj.program_area = validated_data.pop('program_area')
obj.save()
return obj
class CollectionSubmissionCreateSerializer(CollectionSubmissionSerializer):
# Makes guid writeable only on create
guid = GuidRelationshipField(
related_view='guids:guid-detail',
related_view_kwargs={'guids': '<guid._id>'},
always_embed=True,
read_only=False,
required=True,
)
def create(self, validated_data):
subjects = validated_data.pop('subjects', None)
collection = validated_data.pop('collection', None)
creator = validated_data.pop('creator', None)
guid = validated_data.pop('guid')
if not collection:
raise exceptions.ValidationError('"collection" must be specified.')
if not creator:
raise exceptions.ValidationError('"creator" must be specified.')
if not (creator.has_perm('write_collection', collection) or (hasattr(guid.referent, 'has_permission') and guid.referent.has_permission(creator, WRITE))):
raise exceptions.PermissionDenied('Must have write permission on either collection or collected object to collect.')
try:
obj = collection.collect_object(guid.referent, creator, **validated_data)
except ValidationError as e:
raise InvalidModelValueError(e.message)
if subjects:
auth = get_user_auth(self.context['request'])
try:
obj.set_subjects(subjects, auth)
except PermissionsError as e:
raise exceptions.PermissionDenied(detail=str(e))
except (ValueError, NodeStateError) as e:
raise exceptions.ValidationError(detail=str(e))
return obj
class CollectionNodeLinkSerializer(NodeLinksSerializer):
target_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<guid.referent._id>'},
always_embed=True,
)
def get_absolute_url(self, obj):
return absolute_reverse(
'collections:node-pointer-detail',
kwargs={
'collection_id': self.context['request'].parser_context['kwargs']['collection_id'],
'node_link_id': obj.guid._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
# Override NodeLinksSerializer
def create(self, validated_data):
request = self.context['request']
user = request.user
collection = self.context['view'].get_collection()
target_node_id = validated_data['_id']
pointer_node = AbstractNode.load(target_node_id)
if not pointer_node:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' not found.'.format(target_node_id),
)
try:
pointer = collection.collect_object(pointer_node, user)
except ValidationError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' already pointed to by \'{}\'.'.format(target_node_id, collection._id),
)
return pointer
class CollectedAbstractNodeRelationshipSerializer(object):
_abstract_node_subclass = None
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {
'data':
list(self._abstract_node_subclass.objects.filter(
guids__in=obj.guid_links.all(), is_deleted=False,
)),
'self': obj,
}
def update(self, instance, validated_data):
collection = instance['self']
auth = get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.remove_object(pointer)
for node in add:
collection.collect_object(node, auth.user)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
try:
collection.collect_object(node, auth.user)
except ValidationError as e:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node {} generated error: {}.'.format(node._id, e.message),
)
return self.make_instance_obj(collection)
class CollectedNodeRelationshipSerializer(CollectedAbstractNodeRelationshipSerializer, LinkedNodesRelationshipSerializer):
_abstract_node_subclass = Node
class CollectedRegistrationsRelationshipSerializer(CollectedAbstractNodeRelationshipSerializer, LinkedRegistrationsRelationshipSerializer):
_abstract_node_subclass = Registration
class CollectedPreprintsRelationshipSerializer(CollectedAbstractNodeRelationshipSerializer, LinkedPreprintsRelationshipSerializer):
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {
'data':
list(self.context['view'].collection_preprints(obj, user=get_user_auth(self.context['request']).user)),
'self': obj,
}
| |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
from tracing.proto import histogram_proto
from tracing.value import histogram
from tracing.value import histogram_set
from tracing.value.diagnostics import date_range
from tracing.value.diagnostics import diagnostic_ref
from tracing.value.diagnostics import generic_set
def _AddHist(hist_set, name=None, unit=None):
hist = hist_set.histograms.add()
hist.name = name or '_'
hist.unit.unit = unit or histogram_proto.Pb2().MS
return hist
class HistogramSetUnittest(unittest.TestCase):
def testGetSharedDiagnosticsOfType(self):
d0 = generic_set.GenericSet(['foo'])
d1 = date_range.DateRange(0)
hs = histogram_set.HistogramSet()
hs.AddSharedDiagnosticToAllHistograms('generic', d0)
hs.AddSharedDiagnosticToAllHistograms('generic', d1)
diagnostics = hs.GetSharedDiagnosticsOfType(generic_set.GenericSet)
self.assertEqual(len(diagnostics), 1)
self.assertIsInstance(diagnostics[0], generic_set.GenericSet)
def testImportDicts(self):
hist = histogram.Histogram('', 'unitless')
hists = histogram_set.HistogramSet([hist])
hists2 = histogram_set.HistogramSet()
hists2.ImportDicts(hists.AsDicts())
self.assertEqual(len(hists), len(hists2))
def testAssertType(self):
hs = histogram_set.HistogramSet()
with self.assertRaises(AssertionError):
hs.ImportDicts([{'type': ''}])
def testIgnoreTagMap(self):
histogram_set.HistogramSet().ImportDicts([{'type': 'TagMap'}])
def testFilterHistogram(self):
a = histogram.Histogram('a', 'unitless')
b = histogram.Histogram('b', 'unitless')
c = histogram.Histogram('c', 'unitless')
hs = histogram_set.HistogramSet([a, b, c])
hs.FilterHistograms(lambda h: h.name == 'b')
names = set(['a', 'c'])
for h in hs:
self.assertIn(h.name, names)
names.remove(h.name)
self.assertEqual(0, len(names))
def testRemoveOrphanedDiagnostics(self):
da = generic_set.GenericSet(['a'])
db = generic_set.GenericSet(['b'])
a = histogram.Histogram('a', 'unitless')
b = histogram.Histogram('b', 'unitless')
hs = histogram_set.HistogramSet([a])
hs.AddSharedDiagnosticToAllHistograms('a', da)
hs.AddHistogram(b)
hs.AddSharedDiagnosticToAllHistograms('b', db)
hs.FilterHistograms(lambda h: h.name == 'a')
dicts = hs.AsDicts()
self.assertEqual(3, len(dicts))
hs.RemoveOrphanedDiagnostics()
dicts = hs.AsDicts()
self.assertEqual(2, len(dicts))
def testAddSharedDiagnostic(self):
diags = {}
da = generic_set.GenericSet(['a'])
db = generic_set.GenericSet(['b'])
diags['da'] = da
diags['db'] = db
a = histogram.Histogram('a', 'unitless')
b = histogram.Histogram('b', 'unitless')
hs = histogram_set.HistogramSet()
hs.AddSharedDiagnostic(da)
hs.AddHistogram(a, {'da': da})
hs.AddHistogram(b, {'db': db})
# This should produce one shared diagnostic and 2 histograms.
dicts = hs.AsDicts()
self.assertEqual(3, len(dicts))
self.assertEqual(da.AsDict(), dicts[0])
# Assert that you only see the shared diagnostic once.
seen_once = False
for idx, val in enumerate(dicts):
if idx == 0:
continue
if 'da' in val['diagnostics']:
self.assertFalse(seen_once)
self.assertEqual(val['diagnostics']['da'], da.guid)
seen_once = True
def testMerge(self):
hs1 = histogram_set.HistogramSet([histogram.Histogram('a', 'unitless')])
hs1.AddSharedDiagnosticToAllHistograms('name',
generic_set.GenericSet(['diag1']))
hs2 = histogram_set.HistogramSet([histogram.Histogram('b', 'unitless')])
hs2.AddSharedDiagnosticToAllHistograms('name',
generic_set.GenericSet(['diag2']))
hs1.Merge(hs2)
self.assertEqual(len(hs1), 2)
self.assertEqual(len(hs1.shared_diagnostics), 2)
self.assertEqual(hs1.GetHistogramNamed('a').diagnostics['name'],
generic_set.GenericSet(['diag1']))
self.assertEqual(hs1.GetHistogramNamed('b').diagnostics['name'],
generic_set.GenericSet(['diag2']))
def testSharedDiagnostic(self):
hist = histogram.Histogram('', 'unitless')
hists = histogram_set.HistogramSet([hist])
diag = generic_set.GenericSet(['shared'])
hists.AddSharedDiagnosticToAllHistograms('generic', diag)
# Serializing a single Histogram with a single shared diagnostic should
# produce 2 dicts.
ds = hists.AsDicts()
self.assertEqual(len(ds), 2)
self.assertEqual(diag.AsDict(), ds[0])
# The serialized Histogram should refer to the shared diagnostic by its
# guid.
self.assertEqual(ds[1]['diagnostics']['generic'], diag.guid)
# Deserialize ds.
hists2 = histogram_set.HistogramSet()
hists2.ImportDicts(ds)
self.assertEqual(len(hists2), 1)
hist2 = [h for h in hists2][0]
self.assertIsInstance(
hist2.diagnostics.get('generic'), generic_set.GenericSet)
self.assertEqual(list(diag), list(hist2.diagnostics.get('generic')))
def testReplaceSharedDiagnostic(self):
hist = histogram.Histogram('', 'unitless')
hists = histogram_set.HistogramSet([hist])
diag0 = generic_set.GenericSet(['shared0'])
diag1 = generic_set.GenericSet(['shared1'])
hists.AddSharedDiagnosticToAllHistograms('generic0', diag0)
hists.AddSharedDiagnosticToAllHistograms('generic1', diag1)
guid0 = diag0.guid
guid1 = diag1.guid
hists.ReplaceSharedDiagnostic(
guid0, diagnostic_ref.DiagnosticRef('fakeGuid'))
self.assertEqual(hist.diagnostics['generic0'].guid, 'fakeGuid')
self.assertEqual(hist.diagnostics['generic1'].guid, guid1)
def testReplaceSharedDiagnostic_NonRefAddsToMap(self):
hist = histogram.Histogram('', 'unitless')
hists = histogram_set.HistogramSet([hist])
diag0 = generic_set.GenericSet(['shared0'])
diag1 = generic_set.GenericSet(['shared1'])
hists.AddSharedDiagnosticToAllHistograms('generic0', diag0)
guid0 = diag0.guid
guid1 = diag1.guid
hists.ReplaceSharedDiagnostic(guid0, diag1)
self.assertIsNotNone(hists.LookupDiagnostic(guid1))
def testDeduplicateDiagnostics(self):
generic_a = generic_set.GenericSet(['A'])
generic_b = generic_set.GenericSet(['B'])
date_a = date_range.DateRange(42)
date_b = date_range.DateRange(57)
a_hist = histogram.Histogram('a', 'unitless')
generic0 = generic_set.GenericSet.FromDict(generic_a.AsDict())
generic0.AddDiagnostic(generic_b)
a_hist.diagnostics['generic'] = generic0
date0 = date_range.DateRange.FromDict(date_a.AsDict())
date0.AddDiagnostic(date_b)
a_hist.diagnostics['date'] = date0
b_hist = histogram.Histogram('b', 'unitless')
generic1 = generic_set.GenericSet.FromDict(generic_a.AsDict())
generic1.AddDiagnostic(generic_b)
b_hist.diagnostics['generic'] = generic1
date1 = date_range.DateRange.FromDict(date_a.AsDict())
date1.AddDiagnostic(date_b)
b_hist.diagnostics['date'] = date1
c_hist = histogram.Histogram('c', 'unitless')
c_hist.diagnostics['generic'] = generic1
histograms = histogram_set.HistogramSet([a_hist, b_hist, c_hist])
self.assertNotEqual(
a_hist.diagnostics['generic'].guid, b_hist.diagnostics['generic'].guid)
self.assertEqual(
b_hist.diagnostics['generic'].guid, c_hist.diagnostics['generic'].guid)
self.assertEqual(
a_hist.diagnostics['generic'], b_hist.diagnostics['generic'])
self.assertNotEqual(
a_hist.diagnostics['date'].guid, b_hist.diagnostics['date'].guid)
self.assertEqual(
a_hist.diagnostics['date'], b_hist.diagnostics['date'])
histograms.DeduplicateDiagnostics()
self.assertEqual(
a_hist.diagnostics['generic'].guid, b_hist.diagnostics['generic'].guid)
self.assertEqual(
b_hist.diagnostics['generic'].guid, c_hist.diagnostics['generic'].guid)
self.assertEqual(
a_hist.diagnostics['generic'], b_hist.diagnostics['generic'])
self.assertEqual(
a_hist.diagnostics['date'].guid, b_hist.diagnostics['date'].guid)
self.assertEqual(
a_hist.diagnostics['date'], b_hist.diagnostics['date'])
histogram_dicts = histograms.AsDicts()
# All diagnostics should have been serialized as DiagnosticRefs.
for d in histogram_dicts:
if 'type' not in d:
for diagnostic_dict in d['diagnostics'].values():
self.assertIsInstance(diagnostic_dict, str)
histograms2 = histogram_set.HistogramSet()
histograms2.ImportDicts(histograms.AsDicts())
a_hists = histograms2.GetHistogramsNamed('a')
self.assertEqual(len(a_hists), 1)
a_hist2 = a_hists[0]
b_hists = histograms2.GetHistogramsNamed('b')
self.assertEqual(len(b_hists), 1)
b_hist2 = b_hists[0]
self.assertEqual(
a_hist2.diagnostics['generic'].guid,
b_hist2.diagnostics['generic'].guid)
self.assertEqual(
a_hist2.diagnostics['generic'],
b_hist2.diagnostics['generic'])
self.assertEqual(
a_hist2.diagnostics['date'].guid,
b_hist2.diagnostics['date'].guid)
self.assertEqual(
a_hist2.diagnostics['date'],
b_hist2.diagnostics['date'])
def testBasicImportFromProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = hist_set.histograms.add()
hist.name = 'metric1'
hist.unit.unit = histogram_proto.Pb2().TS_MS
hist = hist_set.histograms.add()
hist.name = 'metric2'
hist.unit.unit = histogram_proto.Pb2().SIGMA
hist.unit.improvement_direction = histogram_proto.Pb2().BIGGER_IS_BETTER
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
hists = list(parsed)
# The order of the histograms isn't guaranteed.
self.assertEqual(len(hists), 2)
self.assertItemsEqual(
[hists[0].name, hists[1].name], ['metric1', 'metric2'])
self.assertItemsEqual(
[hists[0].unit, hists[1].unit], ['tsMs', 'sigma_biggerIsBetter'])
def testSimpleFieldsFromProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = _AddHist(hist_set)
hist.description = 'description!'
hist.sample_values.append(21)
hist.sample_values.append(22)
hist.sample_values.append(23)
hist.max_num_sample_values = 3
hist.num_nans = 1
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
self.assertEqual(parsed_hist.description, 'description!')
self.assertEqual(parsed_hist.sample_values, [21, 22, 23])
self.assertEqual(parsed_hist.max_num_sample_values, 3)
self.assertEqual(parsed_hist.num_nans, 1)
def testRaisesOnMissingMandatoryFieldsInProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = hist_set.histograms.add()
with self.assertRaises(ValueError):
# Missing name.
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
with self.assertRaises(ValueError):
# Missing unit.
hist.name = "eh"
parsed.ImportProto(hist_set.SerializeToString())
def testMinimalBinBoundsInProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = _AddHist(hist_set)
hist.bin_boundaries.first_bin_boundary = 1
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
# The transport format for bins is relatively easily understood, whereas
# how bins are generated is very complex, so use the former for the bin
# bounds tests. See the histogram spec in docs/histogram-set-json-format.md.
dict_format = parsed_hist.AsDict()['binBoundaries']
self.assertEqual(dict_format, [1])
def testComplexBinBounds(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = _AddHist(hist_set)
hist.bin_boundaries.first_bin_boundary = 17
spec1 = hist.bin_boundaries.bin_specs.add()
spec1.bin_boundary = 18
spec2 = hist.bin_boundaries.bin_specs.add()
spec2.bin_spec.boundary_type = (
histogram_proto.Pb2().BinBoundaryDetailedSpec.EXPONENTIAL)
spec2.bin_spec.maximum_bin_boundary = 19
spec2.bin_spec.num_bin_boundaries = 20
spec3 = hist.bin_boundaries.bin_specs.add()
spec3.bin_spec.boundary_type = (
histogram_proto.Pb2().BinBoundaryDetailedSpec.LINEAR)
spec3.bin_spec.maximum_bin_boundary = 21
spec3.bin_spec.num_bin_boundaries = 22
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
dict_format = parsed_hist.AsDict()['binBoundaries']
self.assertEqual(dict_format, [17, 18, [1, 19, 20], [0, 21, 22]])
def testImportRunningStatisticsFromProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = _AddHist(hist_set)
hist.running.count = 4
hist.running.max = 23
hist.running.meanlogs = 1
hist.running.mean = 22
hist.running.min = 21
hist.running.sum = 66
hist.running.variance = 1
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
# We get at meanlogs through geometric_mean. Variance is after Bessel's
# correction has been applied.
self.assertEqual(parsed_hist.running.count, 4)
self.assertEqual(parsed_hist.running.max, 23)
self.assertEqual(parsed_hist.running.geometric_mean, math.exp(1))
self.assertEqual(parsed_hist.running.mean, 22)
self.assertEqual(parsed_hist.running.min, 21)
self.assertEqual(parsed_hist.running.sum, 66)
self.assertAlmostEqual(parsed_hist.running.variance, 0.3333333333)
def testImportAllBinsFromProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = _AddHist(hist_set)
hist.all_bins[0].bin_count = 24
map1 = hist.all_bins[0].diagnostic_maps.add().diagnostic_map
map1['some bin diagnostic'].generic_set.values.append('"some value"')
map2 = hist.all_bins[0].diagnostic_maps.add().diagnostic_map
map2['other bin diagnostic'].generic_set.values.append('"some other value"')
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
self.assertGreater(len(parsed_hist.bins), 1)
self.assertEqual(len(parsed_hist.bins[0].diagnostic_maps), 2)
self.assertEqual(len(parsed_hist.bins[0].diagnostic_maps[0]), 1)
self.assertEqual(len(parsed_hist.bins[0].diagnostic_maps[1]), 1)
self.assertEqual(
parsed_hist.bins[0].diagnostic_maps[0]['some bin diagnostic'],
generic_set.GenericSet(values=['some value']))
self.assertEqual(
parsed_hist.bins[0].diagnostic_maps[1]['other bin diagnostic'],
generic_set.GenericSet(values=['some other value']))
def testSummaryOptionsFromProto(self):
hist_set = histogram_proto.Pb2().HistogramSet()
hist = _AddHist(hist_set)
hist.summary_options.avg = True
hist.summary_options.nans = True
hist.summary_options.geometric_mean = True
hist.summary_options.percentile.append(0.90)
hist.summary_options.percentile.append(0.95)
hist.summary_options.percentile.append(0.99)
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
# See the histogram spec in docs/histogram-set-json-format.md.
# Serializing to proto leads to funny rounding errors.
self.assertEqual(
parsed_hist.statistics_names,
set(['pct_099_0000009537', 'pct_089_9999976158', 'pct_094_9999988079',
'nans', 'avg', 'geometricMean']),
msg=str(parsed_hist.statistics_names))
def testImportSharedDiagnosticsFromProto(self):
guid1 = 'f7f17394-fa4a-481e-86bd-a82cd55935a7'
guid2 = '88ea36c7-6dcb-4ba8-ba56-1979de05e16f'
hist_set = histogram_proto.Pb2().HistogramSet()
hist_set.shared_diagnostics[guid1].generic_set.values.append(
'"webrtc_perf_tests"')
hist_set.shared_diagnostics[guid2].generic_set.values.append('123456')
hist_set.shared_diagnostics['whatever'].generic_set.values.append('2')
hist = hist_set.histograms.add()
hist.name = "_"
hist.unit.unit = histogram_proto.Pb2().MS
hist.diagnostics.diagnostic_map['bots'].shared_diagnostic_guid = guid1
hist.diagnostics.diagnostic_map['pointId'].shared_diagnostic_guid = guid2
parsed = histogram_set.HistogramSet()
parsed.ImportProto(hist_set.SerializeToString())
parsed_hist = parsed.GetFirstHistogram()
self.assertIsNotNone(parsed_hist)
self.assertEqual(len(parsed_hist.diagnostics), 2)
self.assertEqual(parsed_hist.diagnostics['pointId'],
generic_set.GenericSet(values=[123456]))
self.assertEqual(parsed_hist.diagnostics['bots'],
generic_set.GenericSet(values=['webrtc_perf_tests']))
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Attempt to check each interface in nipype
"""
from __future__ import print_function
from builtins import object
# Stdlib imports
import os
import re
import sys
import warnings
from nipype.interfaces.base import BaseInterface
from nipype.external.six import string_types
# Functions and classes
class InterfaceChecker(object):
"""Class for checking all interface specifications
"""
def __init__(self,
package_name,
package_skip_patterns=None,
module_skip_patterns=None,
class_skip_patterns=None
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
class_skip_patterns : None or sequence
Sequence of strings giving classes to be excluded
Default is: None
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
if class_skip_patterns:
self.class_skip_patterns = class_skip_patterns
else:
self.class_skip_patterns = []
self.package_name = package_name
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
"""Set package_name"""
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
"""Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
"""
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([], [])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f, uri)
f.close()
return functions, classes
def _parse_lines(self, linesource, module):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_') and \
self._survives_exclude('.'.join((module, name)),
'class'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def test_specs(self, uri):
"""Check input and output specs in an uri
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
"""
# get the names of all classes and functions
_, classes = self._parse_module(uri)
if not classes:
# print 'WARNING: Empty -',uri # dbg
return None
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name, '', uri)
allowed_keys = ['desc', 'genfile', 'xor', 'requires', 'desc',
'nohash', 'argstr', 'position', 'mandatory',
'copyfile', 'usedefault', 'sep', 'hash_files',
'deprecated', 'new_name', 'min_ver', 'max_ver',
'name_source', 'name_template', 'keep_extension',
'units', 'output_name']
in_built = ['type', 'copy', 'parent', 'instance_handler',
'comparison_mode', 'array', 'default', 'editor']
bad_specs = []
for c in classes:
__import__(uri)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classinst = sys.modules[uri].__dict__[c]
except Exception as inst:
continue
if not issubclass(classinst, BaseInterface):
continue
testdir = os.path.join(*(uri.split('.')[:-1] + ['tests']))
if not os.path.exists(testdir):
os.makedirs(testdir)
nonautotest = os.path.join(testdir, 'test_%s.py' % c)
testfile = os.path.join(testdir, 'test_auto_%s.py' % c)
if os.path.exists(testfile):
os.unlink(testfile)
if not os.path.exists(nonautotest):
with open(testfile, 'wt') as fp:
cmd = ['# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT',
'from %stesting import assert_equal' %
('.' * len(uri.split('.'))),
'from ..%s import %s' % (uri.split('.')[-1], c),
'']
cmd.append('\ndef test_%s_inputs():' % c)
input_fields = ''
for traitname, trait in sorted(classinst.input_spec().traits(transient=None).items()):
input_fields += '%s=dict(' % traitname
for key, value in sorted(trait.__dict__.items()):
if key in in_built or key == 'desc':
continue
if isinstance(value, string_types):
quote = "'"
if "'" in value:
quote = '"'
input_fields += "%s=%s%s%s,\n " % (key, quote,
value, quote)
else:
input_fields += "%s=%s,\n " % (key, value)
input_fields += '),\n '
cmd += [' input_map = dict(%s)' % input_fields]
cmd += [' inputs = %s.input_spec()' % c]
cmd += ["""
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value"""]
fp.writelines('\n'.join(cmd) + '\n\n')
else:
print('%s has nonautotest' % c)
for traitname, trait in sorted(classinst.input_spec().traits(transient=None).items()):
for key in sorted(trait.__dict__):
if key in in_built:
continue
parent_metadata = []
if 'parent' in trait.__dict__:
parent_metadata = list(getattr(trait, 'parent').__dict__.keys())
if key not in allowed_keys + classinst._additional_metadata\
+ parent_metadata:
bad_specs.append([uri, c, 'Inputs', traitname, key])
if key == 'mandatory' and trait.mandatory is not None and not trait.mandatory:
bad_specs.append([uri, c, 'Inputs', traitname, 'mandatory=False'])
if not classinst.output_spec:
continue
if not os.path.exists(nonautotest):
with open(testfile, 'at') as fp:
cmd = ['\ndef test_%s_outputs():' % c]
input_fields = ''
for traitname, trait in sorted(classinst.output_spec().traits(transient=None).items()):
input_fields += '%s=dict(' % traitname
for key, value in sorted(trait.__dict__.items()):
if key in in_built or key == 'desc':
continue
if isinstance(value, string_types):
quote = "'"
if "'" in value:
quote = '"'
input_fields += "%s=%s%s%s,\n " % (key, quote,
value, quote)
else:
input_fields += "%s=%s,\n " % (key, value)
input_fields += '),\n '
cmd += [' output_map = dict(%s)' % input_fields]
cmd += [' outputs = %s.output_spec()' % c]
cmd += ["""
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value"""]
fp.writelines('\n'.join(cmd) + '\n')
for traitname, trait in sorted(classinst.output_spec().traits(transient=None).items()):
for key in sorted(trait.__dict__):
if key in in_built:
continue
parent_metadata = []
if 'parent' in trait.__dict__:
parent_metadata = list(getattr(trait, 'parent').__dict__.keys())
if key not in allowed_keys + classinst._additional_metadata\
+ parent_metadata:
bad_specs.append([uri, c, 'Outputs', traitname, key])
return bad_specs
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
elif match_type == 'class':
patterns = self.class_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def check_modules(self):
# write the list
modules = self.discover_modules()
checked_modules = []
for m in modules:
bad_specs = self.test_specs(m)
if bad_specs:
checked_modules.extend(bad_specs)
for bad_spec in checked_modules:
print(':'.join(bad_spec))
if __name__ == "__main__":
package = 'nipype'
ic = InterfaceChecker(package)
# Packages that should not be included in generated API docs.
ic.package_skip_patterns += ['\.external$',
'\.fixes$',
'\.utils$',
'\.pipeline',
'\.testing',
'\.caching',
'\.workflows',
]
"""
# Modules that should not be included in generated API docs.
ic.module_skip_patterns += ['\.version$',
'\.interfaces\.base$',
'\.interfaces\.matlab$',
'\.interfaces\.rest$',
'\.interfaces\.pymvpa$',
'\.interfaces\.slicer\.generate_classes$',
'\.interfaces\.spm\.base$',
'\.interfaces\.traits',
'\.pipeline\.alloy$',
'\.pipeline\.s3_node_wrapper$',
'.\testing',
]
ic.class_skip_patterns += ['AFNI',
'ANTS',
'FSL',
'FS',
'Info',
'^SPM',
'Tester',
'Spec$',
'Numpy',
'NipypeTester',
]
"""
ic.check_modules()
| |
import numpy as np
import pandas as pd
import pyflux as pf
# Set up some data to use for the tests
noise = np.random.normal(0,1,400)
y = np.zeros(400)
x1 = np.random.normal(0,1,400)
x2 = np.random.normal(0,1,400)
for i in range(1,len(y)):
y[i] = 0.9*y[i-1] + noise[i] + 0.1*x1[i] - 0.3*x2[i]
data = pd.DataFrame([y,x1,x2]).T
data.columns = ['y', 'x1', 'x2']
countdata = np.random.poisson(3,300)
x1 = np.random.normal(0,1,300)
x2 = np.random.normal(0,1,300)
data2 = pd.DataFrame([countdata,x1,x2]).T
data2.columns = ['y', 'x1', 'x2']
y_oos = np.random.normal(0,1,30)
x1_oos = np.random.normal(0,1,30)
x2_oos = np.random.normal(0,1,30)
countdata_oos = np.random.poisson(3,30)
data_oos = pd.DataFrame([y_oos,x1_oos,x2_oos]).T
data_oos.columns = ['y', 'x1', 'x2']
data2_oos = pd.DataFrame([countdata_oos,x1_oos,x2_oos]).T
data2_oos.columns = ['y', 'x1', 'x2']
model_1 = pf.GASX(formula="y ~ x1", data=data, ar=0, sc=0, family=pf.Normal())
x_1 = model_1.fit()
model_2 = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x_2 = model_2.fit()
model_3 = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, integ=1, family=pf.Normal())
x_3 = model_3.fit()
model_4 = pf.GASX(formula="y ~ x1", data=data, ar=2, sc=2, family=pf.Normal())
x_4 = model_4.fit()
model_b_1 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=0, sc=0, family=pf.Normal())
x_1 = model_b_1.fit()
model_b_2 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x_2 = model_b_2.fit()
model_b_3 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, integ=1, family=pf.Normal())
x_3 = model_b_3.fit()
model_b_4 = pf.GASX(formula="y ~ x1 + x2", data=data, ar=2, sc=2, family=pf.Normal())
x_4 = model_b_4.fit()
def test_no_terms():
"""
Tests the length of the latent variable vector for an GASX model
with no AR or SC terms, and tests that the values are not nan
"""
assert(len(model_1.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model_1.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term, and tests that the values are not nan
"""
assert(len(model_2.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model_2.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms_integ():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term and integrated once, and tests that the
values are not nan
"""
assert(len(model_3.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model_3.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi():
"""
Tests an GASX model estimated with BBVI, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi_mini_batch():
"""
Tests an GASX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500, mini_batch=32)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500, mini_batch=32, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_mh():
"""
Tests an GASX model estimated with Metropolis-Hastings, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_laplace():
"""
Tests an GASX model estimated with Laplace approximation, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_pml():
"""
Tests an GASX model estimated with PML, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
assert(model_4.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
assert(model_4.predict_is(h=5).shape[0] == 5)
def test_predict_nans():
"""
Tests that the predictions are not NaNs
"""
assert(len(model_4.predict(h=5, oos_data=data_oos).values[np.isnan(model_4.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
assert(len(model_4.predict_is(h=5).values[np.isnan(model_4.predict_is(h=5).values)]) == 0)
def test_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
predictions = model_2.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_1.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_1.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_sample_model():
"""
Tests sampling function
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test_ppc():
"""
Tests PPC value
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
## Try more than one predictor
def test2_no_terms():
"""
Tests the length of the latent variable vector for an GASX model
with no AR or SC terms, and two predictors, and tests that the values
are not nan
"""
assert(len(model_b_1.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model_b_1.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_couple_terms():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term, and two predictors, and tests that the values
are not nan
"""
assert(len(model_b_2.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model_b_2.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_bbvi():
"""
Tests an GASX model estimated with BBVI, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_bbvi_mini_batch():
"""
Tests an GASX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500, mini_batch=32)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI',iterations=500, mini_batch=32, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_mh():
"""
Tests an GASX model estimated with MEtropolis-Hastings, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_laplace():
"""
Tests an GASX model estimated with Laplace, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_pml():
"""
Tests an GASX model estimated with PML, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 6)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
assert(model_b_2.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test2_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
assert(model_b_2.predict_is(h=5).shape[0] == 5)
def test2_predict_nans():
"""
Tests that the predictions are not NaNs
"""
assert(len(model_b_2.predict(h=5, oos_data=data_oos).values[np.isnan(model_b_2.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test2_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
assert(len(model_b_2.predict_is(h=5).values[np.isnan(model_b_2.predict_is(h=5).values)]) == 0)
def test2_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
predictions = model_b_2.predict(h=10, oos_data=data_oos, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test2_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
predictions = model_b_2.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test2_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_b_2.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
predictions = model_b_2.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_sample_model():
"""
Tests sampling function
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test2_ppc():
"""
Tests PPC value
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| |
"""The tests for the integration sensor platform."""
from homeassistant.components.compensation.const import CONF_PRECISION, DOMAIN
from homeassistant.components.compensation.sensor import ATTR_COEFFICIENTS
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
EVENT_HOMEASSISTANT_START,
EVENT_STATE_CHANGED,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
async def test_linear_state(hass):
"""Test compensation sensor state."""
config = {
"compensation": {
"test": {
"source": "sensor.uncompensated",
"data_points": [
[1.0, 2.0],
[2.0, 3.0],
],
"precision": 2,
"unit_of_measurement": "a",
}
}
}
expected_entity_id = "sensor.compensation_sensor_uncompensated"
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["test"]["source"]
hass.states.async_set(entity_id, 4, {})
await hass.async_block_till_done()
state = hass.states.get(expected_entity_id)
assert state is not None
assert round(float(state.state), config[DOMAIN]["test"][CONF_PRECISION]) == 5.0
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == "a"
coefs = [round(v, 1) for v in state.attributes.get(ATTR_COEFFICIENTS)]
assert coefs == [1.0, 1.0]
hass.states.async_set(entity_id, "foo", {})
await hass.async_block_till_done()
state = hass.states.get(expected_entity_id)
assert state is not None
assert state.state == STATE_UNKNOWN
async def test_linear_state_from_attribute(hass):
"""Test compensation sensor state that pulls from attribute."""
config = {
"compensation": {
"test": {
"source": "sensor.uncompensated",
"attribute": "value",
"data_points": [
[1.0, 2.0],
[2.0, 3.0],
],
"precision": 2,
}
}
}
expected_entity_id = "sensor.compensation_sensor_uncompensated_value"
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["test"]["source"]
hass.states.async_set(entity_id, 3, {"value": 4})
await hass.async_block_till_done()
state = hass.states.get(expected_entity_id)
assert state is not None
assert round(float(state.state), config[DOMAIN]["test"][CONF_PRECISION]) == 5.0
coefs = [round(v, 1) for v in state.attributes.get(ATTR_COEFFICIENTS)]
assert coefs == [1.0, 1.0]
hass.states.async_set(entity_id, 3, {"value": "bar"})
await hass.async_block_till_done()
state = hass.states.get(expected_entity_id)
assert state is not None
assert state.state == STATE_UNKNOWN
async def test_quadratic_state(hass):
"""Test 3 degree polynominial compensation sensor."""
config = {
"compensation": {
"test": {
"source": "sensor.temperature",
"data_points": [
[50, 3.3],
[50, 2.8],
[50, 2.9],
[70, 2.3],
[70, 2.6],
[70, 2.1],
[80, 2.5],
[80, 2.9],
[80, 2.4],
[90, 3.0],
[90, 3.1],
[90, 2.8],
[100, 3.3],
[100, 3.5],
[100, 3.0],
],
"degree": 2,
"precision": 3,
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
entity_id = config[DOMAIN]["test"]["source"]
hass.states.async_set(entity_id, 43.2, {})
await hass.async_block_till_done()
state = hass.states.get("sensor.compensation_sensor_temperature")
assert state is not None
assert round(float(state.state), config[DOMAIN]["test"][CONF_PRECISION]) == 3.327
async def test_numpy_errors(hass, caplog):
"""Tests bad polyfits."""
config = {
"compensation": {
"test": {
"source": "sensor.uncompensated",
"data_points": [
[0.0, 1.0],
[0.0, 1.0],
],
},
}
}
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert "invalid value encountered in true_divide" in caplog.text
async def test_datapoints_greater_than_degree(hass, caplog):
"""Tests 3 bad data points."""
config = {
"compensation": {
"test": {
"source": "sensor.uncompensated",
"data_points": [
[1.0, 2.0],
[2.0, 3.0],
],
"degree": 2,
},
}
}
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert "data_points must have at least 3 data_points" in caplog.text
async def test_new_state_is_none(hass):
"""Tests catch for empty new states."""
config = {
"compensation": {
"test": {
"source": "sensor.uncompensated",
"data_points": [
[1.0, 2.0],
[2.0, 3.0],
],
"precision": 2,
"unit_of_measurement": "a",
}
}
}
expected_entity_id = "sensor.compensation_sensor_uncompensated"
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
last_changed = hass.states.get(expected_entity_id).last_changed
hass.bus.async_fire(
EVENT_STATE_CHANGED, event_data={"entity_id": "sensor.uncompensated"}
)
assert last_changed == hass.states.get(expected_entity_id).last_changed
| |
from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\n\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
Reverse relations should also be excluded as these aren't attributes of the
model (rather something like `foo_set`).
"""
field = opts.get_field(name)
if (field.is_relation and
# Generic foreign keys OR reverse relations
((field.many_to_one and not field.related_model) or field.one_to_many)):
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field, empty_value_display):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if field.flatchoices:
return dict(field.flatchoices).get(value, empty_value_display)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, empty_value_display, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if boolean:
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
| |
#!/usr/bin/env python
# -*- encoding: utf-8
"""
CloudFS API port to python
"""
from collections import namedtuple, OrderedDict
import os
import stat
import requests
from requests.auth import HTTPBasicAuth
import json
import enum
import io
from urllib2 import unquote, quote
from math import ceil
from threading import Thread
from time import sleep
from datetime import datetime
import copy
import sys
import hashlib
import codecs
#define BUFFER_INITIAL_SIZE 4096
#define MAX_HEADER_SIZE 8192
#define MAX_PATH_SIZE (1024 + 256 + 3)
#define MAX_URL_SIZE (MAX_PATH_SIZE * 3)
#define USER_AGENT "CloudFuse"
#define OPTION_SIZE 1024
dir_entry = namedtuple("dir_entry", "name full_name content_type size last_modified isdir islink next")
segment_info = namedtuple("segment_info", "fh part size segment_size seg_base method")
options = namedtuple("options", "cache_timeout verify_ssl segment_size segment_above storage_url container temp_dir client_id client_secret refresh_token")
def dmerge(a, b):
for k, v in b.iteritems():
if isinstance(v, dict) and k in a:
dmerge(a[k], v)
else:
a[k] = v
class File(OrderedDict):
def __init__(self, *args, **kwargs):
fname = kwargs.pop('fname', None)
if fname is None:
fname = kwargs['name']
self.fname = fname
OrderedDict.__init__(self, *args, **kwargs)
pass
class Directory(OrderedDict):
def __init__(self, dirname, *args, **kwargs):
self.dirname = dirname
OrderedDict.__init__(self, *args, **kwargs)
class CloudFileReader(object):
MAX_CHUNK_SIZE = 1024 * 1024 * 2
"""
Totally thread unsafe IO-like interface with a read buffer and some extra functionalities
"""
def __init__(self, url, cfsobj):
self.url = url
self.cfsobj = cfsobj
self.reset()
@property
def current_data(self):
return self._datawindow[self._lhead:-1]
@property
def feof(self):
return self._head == self._size
def reset(self):
self._head = 0
self._readable = True
self._datawindow = bytes()
self._lhead = 0
self._feof = False
self.closed = False
self._getinfo()
def stat(self):
return dict(st_mode=self.cfsobj._mode, st_mtime=self._mtime,
st_uid=self.cfsobj._uid, st_gid=self.cfsobj._gid)
def _getinfo(self):
data = self.cfsobj._send_request('HEAD', self.url)
if data.status_code >= 200 and data.status_code < 400:
self._size = int(data.headers['content-length'])
self._seekunit = 1 if 'bytes' in data.headers['accept-ranges'] else 1
self._mtime = datetime.fromtimestamp(float(data.headers['x-timestamp']))
else:
self._readable = False
def _getchunk(self, size):
if self._feof:
return False
first = self._head
last = min(self._size, first + size * self._seekunit)
h = { 'Range': 'bytes={}-{}'.format(
first,
last
)}
data = self.cfsobj._send_request('GET', self.url, extra_headers = h)
if data.status_code >= 200 and data.status_code < 400:
if last == self._size:
self._feof = True
self._datawindow = bytes(data.content)
self._lhead = 0
self._head += last - first
return True
else:
self._feof = True
print("Error", data.status_code)
return False
def read_generator(self, size=-1):
if self.closed: raise IOError("reading closed file")
if size == -1:
data_to_read = self._size
size = self.MAX_CHUNK_SIZE
else:
data_to_read = size
while data_to_read >= 0 and self._readable:
if self._lhead + size > len(self._datawindow):
oldchunk = self._datawindow
oldhead = self._lhead
if not self._getchunk(self.MAX_CHUNK_SIZE):
self._readable = False
yield oldchunk[oldhead:-1]
continue
else:
left = size - len(oldchunk) + oldhead
self._lhead = left
data_to_read -= size
yield oldchunk[oldhead:-1] + self._datawindow[0:left]
else:
self._lhead += size
data_to_read -= size
yield self._datawindow[self._lhead-size:self._lhead]
def read(self, size=-1):
if self.closed: raise IOError("reading closed file")
return ''.join(a for a in self.read_generator(size))
def read1(self, size=-1):
return self.read(size)
def seekable(self):
return False
def readable(self):
return self._readable
def writable(self):
return False
def tell(self):
return self._head + self._lhead
def close(self):
self.closed = True
def readline_generator(self, size=-1):
if self.closed: raise IOError("reading closed file")
if self.feof: yield ""
else:
lfpos = self.current_data.find('\n')
if lfpos > 0:
# advance head anyway
self._lhead += lfpos
if size > -1:
#reduce head if size given: yield incomplete line
lfpos = min(lfpos, size)
yield self._datawindow[self._lhead - lfpos:self._lhead]
def readline(self, size=-1):
if self.closed: raise IOError("reading closed file")
if self.feof: return ""
return next(self.readline_generator(size))
def readlines_generator(self, hint=-1):
if self.closed: raise IOError("reading closed file")
br = 0
while not self.feof:
d = self.readline(-1)
br += len(d)
yield d
if hint > 0 and br > hint:
break
def readlines(self, hint=-1):
if self.closed: raise IOError("reading closed file")
return [a for a in self.readlines_generator(hint)]
def readinto(self, b):
if self.closed: raise IOError("reading closed file")
if not isinstance(b, bytearray):
return None
maxreads = len(b)
reads = 0
while reads < maxreads and not self.feof:
b[reads] = self.read(1)
reads += 1
return reads
def next(self):
if self.feof: raise StopIteration
return self.read(1)
def getvalue(self):
if self.closed: raise IOError("reading closed file")
self.reset()
return self.read(-1)
class CloudFS(object):
"""
Implements CloudFS logic in python. Some logic is deported to specific objects
- Files are handled by CloudFileReader which implements an IO-like interface
to files
"""
# we split files bigger than 10 MB into chunks
# and use a manifest file
MAX_FILE_CHUNK_SIZE = 1024 * 1024 * 10
CHUNKS_FOLDER = 'system/chunks'
MAX_UPLOAD_THREADS = 6
@property
def default_container(self):
if self._default_container is None:
self._default_container = ''
dc = self._send_request('GET', '').content.replace('\n', '')
self._default_container = dc
return self._default_container
def _header_dispatch(self, headers):
self._last_headers = headers
# requests takes care of case sensitivity
if 'x-auth-token' in headers:
self.storage_token = headers['x-auth-token']
if 'x-storage-url' in headers:
self.storage_url = headers['x-storage-url']
if 'x-account-meta-quota' in headers:
self.block_quota = int(headers['x-account-meta-quota'])
if 'x-account-bytes-used' in headers:
self.free_blocks = self.block_quota - int(headers['x-account-bytes-used'])
if 'x-account-object-count' in headers:
pass
def _send_request(self, method, path, extra_headers = [], params = None, payload = None):
tries = 3
headers = dict(extra_headers)
headers['X-Auth-Token'] = self.storage_token
method = method.upper()
path = unquote(path)
extra_args = {}
url = u'{}/{}/{}'.format(self.storage_url, self.default_container, path)
if 'MKDIR' == method:
headers['Content-Type'] = 'application/directory'
method = 'PUT'
pass
elif 'MKLINK' == method:
headers['Content-Type'] = 'application/link'
pass
elif 'PUT' == method:
if isinstance(payload, basestring):
streamsize = len(payload)
elif isinstance(payload, io.FileIO) or isinstance(payload, file):
streamsize = os.path.getsize(payload.name)
if streamsize > self.MAX_FILE_CHUNK_SIZE:
# send to upload queue and return
print("Big file, queueing")
self._uploadqueue.add((path, payload, streamsize))
return
else:
# dicfrect upload
extra_args = dict(data=payload)
elif 'GET' == method:
pass
elif 'DELETE' == method:
pass
while tries > 0:
response = requests.request(method, url=url,
headers=headers, params=params, **extra_args)
if 401 == response.status_code:
self.connect()
elif (response.status_code >= 200 and response.status_code <= 400 or
(response.status_code == 409 and method == 'DELETE')):
self._header_dispatch(response.headers)
return response
else:
print("Request error!")
try:
print('headers', response.headers)
print('status', response.status_code)
print('body', response.content)
except:
print("Bad response", response)
tries -= 1
return response
def create_symlink(self, src, dst):
"""create a symlink"""
pass
def create_directory(self, label):
"""create a directory"""
r = self._send_request('MKDIR', label)
if r.status_code < 200 or r.status_code >= 400:
raise Exception("Cannot create directory")
def _cache_directory(self, refresh = False):
if refresh or self._dircache is None:
resp = self._send_request('GET',
'', params={'format':'json'}
)
data = resp.json()
datatree = {}
dirs = list()
print("Items", len(data))
for f in data:
if f['content_type'] == 'application/directory': continue
pathsplit = f['name'].split('/')
newpath = {}
n = newpath
for elm in pathsplit[0:-1]:
if elm not in n:
n[elm] = Directory(dirname=elm)
n = n[elm]
n[pathsplit[-1]] = File(fname=pathsplit[-1], **f)
dmerge(datatree, newpath)
self._dircache = datatree
return self._dircache
def list_directory(self, dirpath, cached = True):
dircache = self._cache_directory(not cached)
spl = dirpath.split('/')
n = dircache
for e in spl:
n = n.get(e, ValueError("Item does not exist"))
if isinstance(n, ValueError):
raise n
files = [a for a in n.itervalues() if isinstance(a, File)]
dirs = {k: a for k, a in n.iteritems() if isinstance(a, Directory)}
return files, dirs
def get_file(self, path, packetsize = 512*1024, offset = 0):
return CloudFileReader(url = path, cfsobj = self)
def delete_object(self, objpath):
pass
def write_stream(self, stream, path):
" writes a stream to a path in an existing container. "
return self._send_request('PUT', path, payload = stream)
def copy_object(self, src, dst):
pass
def truncate_object(self, objpath, size):
pass
def set_credentials(self, client_id, client_secret, refresh_token):
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
def upload_queue(self):
def _get_parts(stream, pfx, sz, totalsize):
parts = ceil(totalsize / sz)
partstrlen = len(str(parts)) + 1
spl = pfx.split('/')
chunkfolder = '/'.join(spl[0:-1])
#a = stream.read(sz)
i = 0
#while len(a) > 0:
while keep_going:
d = lambda: stream.read(sz)
a = lambda: stream.seek(sz, 1)
#if len(d) == 0: break
yield (pfx, str(i).zfill(partstrlen), d, a)
i += 1
print("Done getting parts")
def _file_uploader(url, headers, data):
tries = 3
while tries > 0:
print("Sending %s" % (url,))
r = requests.put(url, headers = headers, data = data)
if r.status_code < 200 or r.status_code >= 400:
tries -= 1
continue
else:
print("Done putting", url)
return
raise Exception("Could not upload part " + url)
queues = {}
headers = {}
threads = []
for path, stream, streamsize in self._uploadqueue:
self.create_directory(self.CHUNKS_FOLDER + '/' + path)
keep_going = True
for pfx, chunk, data_loader, data_skipper in _get_parts(stream,
self.CHUNKS_FOLDER + path, self.MAX_FILE_CHUNK_SIZE, streamsize):
headers['X-Auth-Token'] = self.storage_token
pathbuild = u'{}/{}/{}'.format(self.CHUNKS_FOLDER, path, chunk).replace('//', '/')
url = u'{}/{}/{}'.format(self.storage_url, self.default_container, pathbuild).encode('utf-8')
existobj = self._send_request('HEAD', pathbuild)
if existobj.status_code < 400:
print("Chunk exists!", path, chunk)
data_skipper()
continue
data_bytes = data_loader()
if len(data_bytes) == 0:
print("EOF for ", path)
keep_going = False
continue
else:
print("Send chunk of", len(data_bytes), path, hashlib.sha1(data_bytes).hexdigest())
t = Thread(target=_file_uploader,
args=(url, headers, data_bytes))
threads.append(t)
t.start()
while len([_ for _ in threads if _.isAlive()]) > self.MAX_UPLOAD_THREADS:
sleep(0.5)
pathbuild = u"{}/{}".format(self.default_container, path).replace("//", "/")
url = u'{}/{}'.format(self.storage_url, pathbuild).encode('utf-8')
headers['X-Object-Manifest'] = u'{}/{}/{}/'.format(self._default_container, self.CHUNKS_FOLDER, path).encode('utf-8')
headers['Content-Type'] = 'application/octet-stream'
requests.put(url, headers = headers, data = '')
print("Created item")
print("Joining laties")
for t in threads: t.join()
print("Done joining laties")
self._uploadqueue.clear()
def __init__(self, parameters = {}):
# initialize structures
self.statcache = dict()
self.storage_token = None
self.storage_url = None
self.block_quota = None
self.free_blocks = None
self.file_quota = None
self.files_free = None
self._dircache = None
self._default_container = None
self._uid = parameters.get('uid', 0)
self._gid = parameters.get('uid', 0)
self._mode = parameters.get('mode', 0750)
self._uploadqueue = set()
self._uploadpartsqueue = set()
self.stopped = False
class Hubic(CloudFS):
def connect(self):
""" this performs the Hubic authentication """
token_url = "https://api.hubic.com/oauth/token"
creds_url = "https://api.hubic.com/1.0/account/credentials"
req = {"refresh_token": self.refresh_token, "grant_type": "refresh_token" }
try:
response = requests.post(token_url, auth=(
self.client_id,
self.client_secret
),
data=req)
r = response.json()
except:
print("Starting over")
try:
print("status", response.status_code)
print("headers", response.headers)
print("body", response.content)
except:
print("bad response", response)
sleep(10)
return self.connect()
access_token = r['access_token']
token_type = r['token_type']
expires_in = r['expires_in']
resp2 = requests.get(creds_url,
headers={"Authorization": "Bearer {}".format(access_token)})
r = resp2.json()
self.storage_url = r['endpoint']
self.storage_token = r['token']
print("Done")
def __init__(self, client_id, client_secret, refresh_token, *args, **kwargs):
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
CloudFS.__init__(self, *args, **kwargs)
def upload_file(h, verb, local, directory, remote):
if verb == 'create':
try:
f, d = h.list_directory(directory)
for a in f:
if os.path.basename(a['name']) == remote:
print("File exists", remote)
return
except ValueError:
print("Dir does not exist", directory)
pass
print(u"UPLOAD FILE Sending ", remote)
h.write_stream(io.FileIO(local, "rb"), u"{}/{}".format(directory, remote))
h.upload_queue()
print(u"Uploaded {} to {}".format(local, directory))
if __name__ == '__main__':
from os import environ
from sys import argv
verb = argv[1]
if verb not in 'create replace'.split():
print("Usage: %s <create|replace> <(local_fn remote_folder [remote_fn])|('pipe' root_folder)>" % (argv[0],) )
sys.exit(0)
client_id = environ['HUBIC_CLIENT_ID']
client_secret = environ['HUBIC_CLIENT_SECRET']
ref_token = environ['HUBIC_REFRESH_TOKEN']
h = Hubic(client_id, client_secret, ref_token)
h.connect()
if argv[2] == 'pipe':
tgtdir = argv[3].decode('utf-8')
sys.stdin = codecs.getreader("utf-8")(sys.stdin)
for line in sys.stdin:
line = line.replace('\n', '')
d = tgtdir + os.path.dirname(line)
tgt = os.path.basename(line)
upload_file(h, 'create', line, d, tgt)
else:
filepath, targetfolder = argv[2:4]
targetfile = os.path.basename(filepath) if len(argv) == 4 else argv[4]
upload_file(h, verb, filepath, targetfolder, targetfile)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| |
################################################################################
### @brief creates examples from documentation files
###
### @file
###
### DISCLAIMER
###
### Copyright by triAGENS GmbH - All rights reserved.
###
### The Programs (which include both the software and documentation)
### contain proprietary information of triAGENS GmbH; they are
### provided under a license agreement containing restrictions on use and
### disclosure and are also protected by copyright, patent and other
### intellectual and industrial property laws. Reverse engineering,
### disassembly or decompilation of the Programs, except to the extent
### required to obtain interoperability with other independently created
### software or as specified by law, is prohibited.
###
### The Programs are not intended for use in any nuclear, aviation, mass
### transit, medical, or other inherently dangerous applications. It shall
### be the licensee's responsibility to take all appropriate fail-safe,
### backup, redundancy, and other measures to ensure the safe use of such
### applications if the Programs are used for such purposes, and triAGENS
### GmbH disclaims liability for any damages caused by such use of
### the Programs.
###
### This software is the confidential and proprietary information of
### triAGENS GmbH. You shall not disclose such confidential and
### proprietary information and shall use it only in accordance with the
### terms of the license agreement you entered into with triAGENS GmbH.
###
### Copyright holder is triAGENS GmbH, Cologne, Germany
###
### @author Dr. Frank Celler
### @author Copyright 2011-2014, triagens GmbH, Cologne, Germany
################################################################################
import re, sys, string, os
argv = sys.argv
argv.pop(0)
################################################################################
### @brief enable debug output
################################################################################
DEBUG = False
################################################################################
### @brief enable debug output in JavaScript
################################################################################
JS_DEBUG = False
################################################################################
### @brief output directory
################################################################################
OutputDir = "/tmp/"
################################################################################
### @brief arangosh output
###
### A list of commands that are executed in order to produce the output. The
### commands and there output is logged.
################################################################################
ArangoshOutput = {}
################################################################################
### @brief arangosh run
###
### A list of commands that are executed in order to produce the output. This
### is mostly used for HTTP request examples.
################################################################################
ArangoshRun = {}
################################################################################
### @brief arangosh output files
################################################################################
ArangoshFiles = {}
################################################################################
### @brief arangosh examples, in some deterministic order
################################################################################
ArangoshCases = [ ]
################################################################################
### @brief global setup for arangosh
################################################################################
ArangoshSetup = ""
################################################################################
### @brief states
################################################################################
STATE_BEGIN = 0
STATE_ARANGOSH_OUTPUT = 1
STATE_ARANGOSH_RUN = 2
state = STATE_BEGIN
################################################################################
### @brief option states
################################################################################
OPTION_NORMAL = 0
OPTION_ARANGOSH_SETUP = 1
OPTION_OUTPUT_DIR = 2
fstate = OPTION_NORMAL
################################################################################
### @brief append input
################################################################################
def appendInput (partialCmd, cmd, addNL):
nl = ""
if addNL:
nl = "\\n"
if partialCmd == "":
return "arangosh> " + cmd + nl
else:
return partialCmd + "........> " + cmd + nl
################################################################################
### @brief get file names
################################################################################
filenames = []
for filename in argv:
if filename == "--arangosh-setup":
fstate = OPTION_ARANGOSH_SETUP
continue
if filename == "--output-dir":
fstate = OPTION_OUTPUT_DIR
continue
if fstate == OPTION_NORMAL:
if os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
for file in files:
if (file.endswith(".mdpp") or file.endswith(".js") or file.endswith(".cpp")) and not file.endswith("ahuacatl.js"):
filenames.append(os.path.join(root, file))
else:
filenames.append(filename)
elif fstate == OPTION_ARANGOSH_SETUP:
fstate = OPTION_NORMAL
f = open(filename, "r")
for line in f:
line = line.rstrip('\n')
ArangoshSetup += line + "\n"
f.close()
elif fstate == OPTION_OUTPUT_DIR:
fstate = OPTION_NORMAL
OutputDir = filename
################################################################################
### @brief loop over input files
################################################################################
r1 = re.compile(r'^(/// )?@EXAMPLE_ARANGOSH_OUTPUT{([^}]*)}')
r2 = re.compile(r'^(/// )?@EXAMPLE_ARANGOSH_RUN{([^}]*)}')
r3 = re.compile(r'^@END_EXAMPLE_')
r4 = re.compile(r'^ +')
strip = None
name = ""
partialCmd = ""
partialLine = ""
for filename in filenames:
f = open(filename, "r")
state = STATE_BEGIN
for line in f:
if strip is None:
strip = ""
line = line.rstrip('\n')
# read the start line and remember the prefix which must be skipped
if state == STATE_BEGIN:
m = r1.match(line)
if m:
strip = m.group(1)
name = m.group(2)
if name in ArangoshFiles:
print >> sys.stderr, "%s\nduplicate file name '%s'\n%s\n" % ('#' * 80, name, '#' * 80)
ArangoshFiles[name] = True
ArangoshOutput[name] = []
state = STATE_ARANGOSH_OUTPUT
continue
m = r2.match(line)
if m:
strip = m.group(1)
name = m.group(2)
if name in ArangoshFiles:
print >> sys.stderr, "%s\nduplicate file name '%s'\n%s\n" % ('#' * 80, name, '#' * 80)
ArangoshCases.append(name)
ArangoshFiles[name] = True
ArangoshRun[name] = ""
state = STATE_ARANGOSH_RUN
continue
continue
# we are within a example
line = line[len(strip):]
showCmd = True
# end-example test
m = r3.match(line)
if m:
name = ""
partialLine = ""
partialCmd = ""
state = STATE_BEGIN
continue
# fix special characters
cmd = line.replace("\\", "\\\\").replace("'", "\\'")
# handle any continued line magic
if line != "":
if line[0] == "|":
if line.startswith("| "):
line = line[2:]
cmd = cmd[2:]
else:
line = line[1:]
cmd = cmd[1:]
partialLine = partialLine + line + "\n"
partialCmd = appendInput(partialCmd, cmd, True)
continue
if line[0] == "~":
if line.startswith("~ "):
line = line[2:]
else:
line = line[1:]
showCmd = False
elif line.startswith(" "):
line = line[2:]
cmd = cmd[2:]
line = partialLine + line
partialLine = ""
if showCmd:
cmd = appendInput(partialCmd, cmd, False)
partialCmd = ""
else:
cmd = None
if state == STATE_ARANGOSH_OUTPUT:
ArangoshOutput[name].append([line, cmd])
elif state == STATE_ARANGOSH_RUN:
ArangoshRun[name] += line + "\n"
f.close()
################################################################################
### @brief generate arangosh example
################################################################################
gr1 = re.compile(r'^[ \n]*(while|if|var|throw|for) ')
def generateArangoshOutput():
print "var internal = require('internal');"
print "var fs = require('fs');"
print "var ArangoshOutput = {};"
print "internal.startPrettyPrint(true);"
print "internal.stopColorPrint(true);"
if JS_DEBUG:
print "internal.output('%s\\n');" % ('=' * 80)
print "internal.output('ARANGOSH EXAMPLE\\n');"
print "internal.output('%s\\n');" % ('=' * 80)
print
print "(function () {\n%s}());" % ArangoshSetup
print
for key in ArangoshOutput:
value = ArangoshOutput[key]
print "(function() {"
print "internal.startCaptureMode();";
for l in value:
print "try {"
print " var XXX;"
m = gr1.match(l[0])
if l[1]:
print "print('%s');" % l[1]
if m:
print "%s" % l[0]
else:
print "XXX = %s" % l[0]
if l[1]:
print "if (XXX !== undefined) {print(XXX);}"
print "} catch (err) { print(err); }"
print "var output = internal.stopCaptureMode();"
print "ArangoshOutput['%s'] = output;" % key
if JS_DEBUG:
print "internal.output('%s', ':\\n', output, '\\n%s\\n');" % (key, '-' * 80)
print "}());"
for key in ArangoshOutput:
print "fs.write('%s/%s.generated', ArangoshOutput['%s']);" % (OutputDir, key, key)
################################################################################
### @brief generate arangosh run
################################################################################
def generateArangoshRun():
print "var internal = require('internal');"
print "var fs = require('fs');"
print "var ArangoshRun = {};"
print "internal.startPrettyPrint(true);"
print "internal.stopColorPrint(true);"
if JS_DEBUG:
print "internal.output('%s\\n');" % ('=' * 80)
print "internal.output('ARANGOSH RUN\\n');"
print "internal.output('%s\\n');" % ('=' * 80)
print
print "(function () {\n%s}());" % ArangoshSetup
print
for key in ArangoshCases:
value = ArangoshRun[key]
print "(function() {"
print "internal.output('RUN STARTING: %s\\n');" % key
print "var output = '';"
print "var appender = function(text) { output += text; };"
print "var log = function (a) { internal.startCaptureMode(); print(a); appender(internal.stopCaptureMode()); };"
print "var logCurlRequestRaw = internal.appendCurlRequest(appender);"
print "var logCurlRequest = function () { var r = logCurlRequestRaw.apply(logCurlRequestRaw, arguments); db._collections(); return r; };"
print "var curlRequestRaw = internal.appendCurlRequest(function (text) {});"
print "var curlRequest = function () { return curlRequestRaw.apply(curlRequestRaw, arguments); };"
print "var logJsonResponse = internal.appendJsonResponse(appender);"
print "var logRawResponse = internal.appendRawResponse(appender);"
print "var assert = function(a) { if (! a) { internal.output('%s\\nASSERTION FAILED: %s\\n%s\\n'); throw new Error('assertion failed'); } };" % ('#' * 80, key, '#' * 80)
print "try { %s internal.output('RUN SUCCEEDED: %s\\n'); } catch (err) { print('%s\\nRUN FAILED: %s, ', err, '\\n%s\\n'); }" % (value, key, '#' * 80, key, '#' * 80)
print "ArangoshRun['%s'] = output;" % key
if JS_DEBUG:
print "internal.output('%s', ':\\n', output, '\\n%s\\n');" % (key, '-' * 80)
print "fs.write('%s/%s.generated', ArangoshRun['%s']);" % (OutputDir, key, key)
print "}());"
################################################################################
### @brief main
################################################################################
generateArangoshOutput()
generateArangoshRun()
| |
# Copyright 2012-2013 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import textwrap
import traceback
from twisted.internet import defer
from buildbot.process.buildstep import SUCCESS, LoggingBuildStep, ShellMixin
from buildbot.steps import shell
from ..travisyml import TRAVIS_HOOKS
from .base import ConfigurableStep
class SetupVirtualEnv(ShellMixin, LoggingBuildStep):
name = "setup virtualenv"
sandboxname = "sandbox"
def __init__(self, python):
self.python = python
super(SetupVirtualEnv, self).__init__(haltOnFailure=True)
@defer.inlineCallbacks
def run(self):
command = self.buildCommand()
cmd = yield self.makeRemoteShellCommand(
command=["bash", "-c", command])
yield self.runCommand(cmd)
self.setProperty("PATH", os.path.join(
self.getProperty("builddir"), self.workdir, "sandbox/bin") + ":" +
self.worker.worker_environ['PATH'])
defer.returnValue(cmd.results())
def buildCommand(self):
# set up self.command as a very long sh -c invocation
command = textwrap.dedent("""\
PYTHON='python{virtualenv_python}'
VE='{sandboxname}'
VEPYTHON='{sandboxname}/bin/python'
# first, set up the virtualenv if it hasn't already been done, or if it's
# broken (as sometimes happens when a slave's Python is updated)
if ! test -f "$VE/bin/pip" || ! test -d "$VE/lib/$PYTHON" || ! "$VE/bin/python" -c 'import math'; then
echo "Setting up virtualenv $VE";
rm -rf "$VE";
test -d "$VE" && {{ echo "$VE couldn't be removed"; exit 1; }};
virtualenv -p $PYTHON "$VE" || exit 1;
else
echo "Virtualenv already exists"
fi
echo "Upgrading pip";
$VE/bin/pip install -U pip
""").format(
virtualenv_python=self.python, sandboxname=self.sandboxname)
return command
class ShellCommand(shell.ShellCommand):
flunkOnFailure = True
haltOnFailure = True
warnOnWarnings = True
def setupEnvironment(self, cmd):
""" Turn all build properties into environment variables """
shell.ShellCommand.setupEnvironment(self, cmd)
env = {}
for k, v in self.build.getProperties().properties.items():
env[str(k)] = str(v[0])
if cmd.args['env'] is None:
cmd.args['env'] = {}
cmd.args['env'].update(env)
def createSummary(self, stdio):
self.updateStats(stdio)
def setStatistics(self, key, value):
pass
def getStatistics(self, key, default):
pass
def updateStats(self, log):
"""
Parse test results out of common test harnesses.
Currently supported are:
* Plone
* Nose
* Trial
* Something mitchell wrote in Java
"""
stdio = log.getText()
total = passed = skipped = fails = warnings = errors = 0
hastests = False
# Plone? That has lines starting "Ran" and "Total". Total is missing if there is only a single layer.
# For this reason, we total ourselves which lets us work even if someone runes 2 batches of plone tests
# from a single target
# Example::
# Ran 24 tests with 0 failures and 0 errors in 0.009 seconds
if not hastests:
outputs = re.findall(
"Ran (?P<count>[\d]+) tests with (?P<fail>[\d]+) failures and (?P<error>[\d]+) errors",
stdio)
for output in outputs:
total += int(output[0])
fails += int(output[1])
errors += int(output[2])
hastests = True
# Twisted
# Example::
# FAILED (errors=5, successes=11)
# PASSED (successes=16)
if not hastests:
for line in stdio.split("\n"):
if line.startswith("FAILED (") or line.startswith("PASSED ("):
hastests = True
line = line[8:][:-1]
stats = line.split(", ")
data = {}
for stat in stats:
k, v = stat.split("=")
data[k] = int(v)
if "successes" not in data:
total = 0
for number in re.findall(
"Ran (?P<count>[\d]+) tests in ", stdio):
total += int(number)
data["successes"] = total - sum(data.values())
# This matches Nose and Django output
# Example::
# Ran 424 tests in 152.927s
# FAILED (failures=1)
# FAILED (errors=3)
if not hastests:
fails += len(re.findall('FAIL:', stdio))
errors += len(
re.findall(
'======================================================================\nERROR:',
stdio))
for number in re.findall("Ran (?P<count>[\d]+)", stdio):
total += int(number)
hastests = True
# We work out passed at the end because most test runners dont tell us
# and we can't distinguish between different test systems easily so we
# might double count.
passed = total - (skipped + fails + errors + warnings)
# Update the step statistics with out shiny new totals
if hastests:
self.setStatistic('total', total)
self.setStatistic('fails', fails)
self.setStatistic('errors', errors)
self.setStatistic('warnings', warnings)
self.setStatistic('skipped', skipped)
self.setStatistic('passed', passed)
def describe(self, done=False):
description = shell.ShellCommand.describe(self, done)
if done and self.hasStatistic('total'):
def append(stat, fmtstring):
val = self.getStatistic(stat, 0)
if val:
description.append(fmtstring % val)
append("total", "%d tests")
append("fails", "%d fails")
append("errors", "%d errors")
append("warnings", "%d warnings")
append("skipped", "%d skipped")
append("passed", "%d passed")
return description
class TravisSetupSteps(ConfigurableStep):
name = "setup-steps"
haltOnFailure = True
flunkOnFailure = True
MAX_NAME_LENGTH = 50
def addSetupVirtualEnv(self, python):
step = SetupVirtualEnv(python)
self.build.addStepsAfterLastStep([step])
def addShellCommand(self, command):
name = None
condition = None
if isinstance(command, dict):
name = command.get("title")
condition = command.get("condition")
command = command.get("cmd")
if name is None:
name = self.truncateName(command)
if condition is not None:
try:
if not self.testCondition(condition):
return
except Exception:
self.descriptionDone = u"Problem parsing condition"
self.addCompleteLog("condition error", traceback.format_exc())
return
step = ShellCommand(
name=name, description=command, command=['bash', '-c', command])
self.build.addStepsAfterLastStep([step])
def testCondition(self, condition):
l = dict(
(k, v)
for k, (v, s) in self.build.getProperties().properties.items())
return eval(condition, l)
def truncateName(self, name):
name = name.lstrip("#")
name = name.lstrip(" ")
name = name.split("\n")[0]
if len(name) > self.MAX_NAME_LENGTH:
name = name[:self.MAX_NAME_LENGTH - 3] + "..."
return name
@defer.inlineCallbacks
def run(self):
config = yield self.getStepConfig()
if 'python' in config.language:
self.addSetupVirtualEnv(self.getProperty("python"))
for k in TRAVIS_HOOKS:
for command in getattr(config, k):
self.addShellCommand(command=command, )
defer.returnValue(SUCCESS)
| |
from sympy import (
Abs, Dummy, Eq, Gt, Function,
LambertW, Piecewise, Poly, Rational, S, Symbol, Matrix,
asin, acos, acsc, asec, atan, atanh, cos, csc, erf, erfinv, erfc, erfcinv,
exp, log, pi, sin, sinh, sec, sqrt, symbols,
tan, tanh, atan2, arg,
Lambda, imageset, cot, acot, I, EmptySet, Union, E, Interval, Intersection,
oo)
from sympy.core.function import nfloat
from sympy.core.relational import Unequality as Ne
from sympy.functions.elementary.complexes import im, re
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys.rootoftools import CRootOf
from sympy.sets import (FiniteSet, ConditionSet, Complement, ImageSet)
from sympy.utilities.pytest import XFAIL, raises, skip, slow
from sympy.utilities.randtest import verify_numerically as tn
from sympy.physics.units import cm
from sympy.solvers.solveset import (
solveset_real, domain_check, solveset_complex, linear_eq_to_matrix,
linsolve, _is_function_class_equation, invert_real, invert_complex,
solveset)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
c = Symbol('c', real=True)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
q = Symbol('q', real=True)
m = Symbol('m', real=True)
n = Symbol('n', real=True)
def test_invert_real():
x = Symbol('x', real=True)
y = Symbol('y')
n = Symbol('n')
def ireal(x, s=S.Reals):
return Intersection(s, x)
minus_n = Intersection(Interval(-oo, 0), FiniteSet(-n))
plus_n = Intersection(Interval(0, oo), FiniteSet(n))
assert solveset(abs(x) - n, x, S.Reals) == Union(minus_n, plus_n)
assert invert_real(exp(x), y, x) == (x, ireal(FiniteSet(log(y))))
y = Symbol('y', positive=True)
n = Symbol('n', real=True)
assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_real(exp(x), y, x) == (x, FiniteSet(log(y)))
assert invert_real(exp(3*x), y, x) == (x, FiniteSet(log(y) / 3))
assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3))
assert invert_real(exp(x) + 3, y, x) == (x, ireal(FiniteSet(log(y - 3))))
assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3)))
assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y)))
assert invert_real(log(3*x), y, x) == (x, FiniteSet(exp(y) / 3))
assert invert_real(log(x + 3), y, x) == (x, FiniteSet(exp(y) - 3))
minus_y = Intersection(Interval(-oo, 0), FiniteSet(-y))
plus_y = Intersection(Interval(0, oo), FiniteSet(y))
assert invert_real(Abs(x), y, x) == (x, Union(minus_y, plus_y))
assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2)))
assert invert_real(2**exp(x), y, x) == (x, ireal(FiniteSet(log(log(y)/log(2)))))
assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y)))
assert invert_real(x**Rational(1, 2), y, x) == (x, FiniteSet(y**2))
raises(ValueError, lambda: invert_real(x, x, x))
raises(ValueError, lambda: invert_real(x**pi, y, x))
raises(ValueError, lambda: invert_real(S.One, y, x))
assert invert_real(x**31 + x, y, x) == (x**31 + x, FiniteSet(y))
y_1 = Intersection(Interval(-1, oo), FiniteSet(y - 1))
y_2 = Intersection(Interval(-oo, -1), FiniteSet(-y - 1))
assert invert_real(Abs(x**31 + x + 1), y, x) == (x**31 + x,
Union(y_1, y_2))
assert invert_real(sin(x), y, x) == \
(x, imageset(Lambda(n, n*pi + (-1)**n*asin(y)), S.Integers))
assert invert_real(sin(exp(x)), y, x) == \
(x, imageset(Lambda(n, log((-1)**n*asin(y) + n*pi)), S.Integers))
assert invert_real(csc(x), y, x) == \
(x, imageset(Lambda(n, n*pi + (-1)**n*acsc(y)), S.Integers))
assert invert_real(csc(exp(x)), y, x) == \
(x, imageset(Lambda(n, log((-1)**n*acsc(y) + n*pi)), S.Integers))
assert invert_real(cos(x), y, x) == \
(x, Union(imageset(Lambda(n, 2*n*pi + acos(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - acos(y)), S.Integers)))
assert invert_real(cos(exp(x)), y, x) == \
(x, Union(imageset(Lambda(n, log(2*n*pi + acos(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - acos(y))), S.Integers)))
assert invert_real(sec(x), y, x) == \
(x, Union(imageset(Lambda(n, 2*n*pi + asec(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - asec(y)), S.Integers)))
assert invert_real(sec(exp(x)), y, x) == \
(x, Union(imageset(Lambda(n, log(2*n*pi + asec(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - asec(y))), S.Integers)))
assert invert_real(tan(x), y, x) == \
(x, imageset(Lambda(n, n*pi + atan(y)), S.Integers))
assert invert_real(tan(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + atan(y))), S.Integers))
assert invert_real(cot(x), y, x) == \
(x, imageset(Lambda(n, n*pi + acot(y)), S.Integers))
assert invert_real(cot(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + acot(y))), S.Integers))
assert invert_real(tan(tan(x)), y, x) == \
(tan(x), imageset(Lambda(n, n*pi + atan(y)), S.Integers))
x = Symbol('x', positive=True)
assert invert_real(x**pi, y, x) == (x, FiniteSet(y**(1/pi)))
# Test for ``set_h`` containing information about the domain
n = Dummy('n')
x = Symbol('x')
h1 = Intersection(Interval(-3, oo), FiniteSet(a + b - 3),
imageset(Lambda(n, -n + a - 3), Interval(-oo, 0)))
h2 = Intersection(Interval(-oo, -3), FiniteSet(-a + b - 3),
imageset(Lambda(n, n - a - 3), Interval(0, oo)))
h3 = Intersection(Interval(-3, oo), FiniteSet(a - b - 3),
imageset(Lambda(n, -n + a - 3), Interval(0, oo)))
h4 = Intersection(Interval(-oo, -3), FiniteSet(-a - b - 3),
imageset(Lambda(n, n - a - 3), Interval(-oo, 0)))
assert invert_real(Abs(Abs(x + 3) - a) - b, 0, x) == (x, Union(h1, h2, h3, h4))
def test_invert_complex():
assert invert_complex(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_complex(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_complex(exp(x), y, x) == \
(x, imageset(Lambda(n, I*(2*pi*n + arg(y)) + log(Abs(y))), S.Integers))
assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y)))
raises(ValueError, lambda: invert_real(1, y, x))
raises(ValueError, lambda: invert_complex(x, x, x))
raises(ValueError, lambda: invert_complex(x, x, 1))
def test_domain_check():
assert domain_check(1/(1 + (1/(x+1))**2), x, -1) is False
assert domain_check(x**2, x, 0) is True
assert domain_check(x, x, oo) is False
assert domain_check(0, x, oo) is False
def test_is_function_class_equation():
from sympy.abc import x, a
assert _is_function_class_equation(TrigonometricFunction,
tan(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x) - a, x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x + a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x*a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
a*tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**2 + sin(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2) + sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(sin(x)) + sin(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x) - a, x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x + a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x*a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
a*tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**2 + sinh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2) + sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(sinh(x)) + sinh(x), x) is False
def test_garbage_input():
raises(ValueError, lambda: solveset_real(x, 1))
raises(ValueError, lambda: solveset_real([x], x))
raises(ValueError, lambda: solveset_real(x, pi))
raises(ValueError, lambda: solveset_real(x, x**2))
raises(ValueError, lambda: solveset_complex([x], x))
raises(ValueError, lambda: solveset_complex(x, pi))
def test_solve_mul():
assert solveset_real((a*x + b)*(exp(x) - 3), x) == \
FiniteSet(-b/a, log(3))
assert solveset_real((2*x + 8)*(8 + exp(x)), x) == FiniteSet(S(-4))
assert solveset_real(x/log(x), x) == EmptySet()
def test_solve_invert():
assert solveset_real(exp(x) - 3, x) == FiniteSet(log(3))
assert solveset_real(log(x) - 3, x) == FiniteSet(exp(3))
assert solveset_real(3**(x + 2), x) == FiniteSet()
assert solveset_real(3**(2 - x), x) == FiniteSet()
assert solveset_real(y - b*exp(a/x), x) == Intersection(S.Reals, FiniteSet(a/log(y/b)))
# issue 4504
assert solveset_real(2**x - 10, x) == FiniteSet(log(10)/log(2))
def test_errorinverses():
assert solveset_real(erf(x) - S.One/2, x) == \
FiniteSet(erfinv(S.One/2))
assert solveset_real(erfinv(x) - 2, x) == \
FiniteSet(erf(2))
assert solveset_real(erfc(x) - S.One, x) == \
FiniteSet(erfcinv(S.One))
assert solveset_real(erfcinv(x) - 2, x) == FiniteSet(erfc(2))
def test_solve_polynomial():
assert solveset_real(3*x - 2, x) == FiniteSet(Rational(2, 3))
assert solveset_real(x**2 - 1, x) == FiniteSet(-S(1), S(1))
assert solveset_real(x - y**3, x) == FiniteSet(y ** 3)
a11, a12, a21, a22, b1, b2 = symbols('a11, a12, a21, a22, b1, b2')
assert solveset_real(x**3 - 15*x - 4, x) == FiniteSet(
-2 + 3 ** Rational(1, 2),
S(4),
-2 - 3 ** Rational(1, 2))
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert len(solveset_real(x**5 + x**3 + 1, x)) == 1
assert len(solveset_real(-2*x**3 + 4*x**2 - 2*x + 6, x)) > 0
def test_return_root_of():
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
# if one uses solve to get the roots of a polynomial that has a CRootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get CRootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(list(solveset_complex(x**5 + 3*x**3 + 7, x))[0],
exponent=False) == CRootOf(x**5 + 3*x**3 + 7, 0).n()
sol = list(solveset_complex(x**6 - 2*x + 2, x))
assert all(isinstance(i, CRootOf) for i in sol) and len(sol) == 6
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
s = x**5 + 4*x**3 + 3*x**2 + S(7)/4
assert solveset_complex(s, x) == \
FiniteSet(*Poly(s*4, domain='ZZ').all_roots())
# XXX: this comparison should work without converting the FiniteSet to list
# See #7876
eq = x*(x - 1)**2*(x + 1)*(x**6 - x + 1)
assert list(solveset_complex(eq, x)) == \
list(FiniteSet(-1, 0, 1, CRootOf(x**6 - x + 1, 0),
CRootOf(x**6 - x + 1, 1),
CRootOf(x**6 - x + 1, 2),
CRootOf(x**6 - x + 1, 3),
CRootOf(x**6 - x + 1, 4),
CRootOf(x**6 - x + 1, 5)))
def test__has_rational_power():
from sympy.solvers.solveset import _has_rational_power
assert _has_rational_power(sqrt(2), x)[0] is False
assert _has_rational_power(x*sqrt(2), x)[0] is False
assert _has_rational_power(x**2*sqrt(x), x) == (True, 2)
assert _has_rational_power(sqrt(2)*x**(S(1)/3), x) == (True, 3)
assert _has_rational_power(sqrt(x)*x**(S(1)/3), x) == (True, 6)
def test_solveset_sqrt_1():
assert solveset_real(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_real(sqrt(x - 1) - x + 7, x) == FiniteSet(10)
assert solveset_real(sqrt(x - 2) - 5, x) == FiniteSet(27)
assert solveset_real(sqrt(x) - 2 - 5, x) == FiniteSet(49)
assert solveset_real(sqrt(x**3), x) == FiniteSet(0)
assert solveset_real(sqrt(x - 1), x) == FiniteSet(1)
def test_solveset_sqrt_2():
# http://tutorial.math.lamar.edu/Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solveset_real(sqrt(2*x - 1) - sqrt(x - 4) - 2, x) == \
FiniteSet(S(5), S(13))
assert solveset_real(sqrt(x + 7) + 2 - sqrt(3 - x), x) == \
FiniteSet(-6)
# http://www.purplemath.com/modules/solverad.htm
assert solveset_real(sqrt(17*x - sqrt(x**2 - 5)) - 7, x) == \
FiniteSet(3)
eq = x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4)
assert solveset_real(eq, x) == FiniteSet(-S(1)/2, -S(1)/3)
eq = sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)
assert solveset_real(eq, x) == FiniteSet(0)
eq = sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)
assert solveset_real(eq, x) == FiniteSet(5)
eq = sqrt(x)*sqrt(x - 7) - 12
assert solveset_real(eq, x) == FiniteSet(16)
eq = sqrt(x - 3) + sqrt(x) - 3
assert solveset_real(eq, x) == FiniteSet(4)
eq = sqrt(2*x**2 - 7) - (3 - x)
assert solveset_real(eq, x) == FiniteSet(-S(8), S(2))
# others
eq = sqrt(9*x**2 + 4) - (3*x + 2)
assert solveset_real(eq, x) == FiniteSet(0)
assert solveset_real(sqrt(x - 3) - sqrt(x) - 3, x) == FiniteSet()
eq = (2*x - 5)**Rational(1, 3) - 3
assert solveset_real(eq, x) == FiniteSet(16)
assert solveset_real(sqrt(x) + sqrt(sqrt(x)) - 4, x) == \
FiniteSet((-S.Half + sqrt(17)/2)**4)
eq = sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))
assert solveset_real(eq, x) == FiniteSet()
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ans = solveset_real(eq, x)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = S(4)/5
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
set([i.n(chop=True) for i in ans]) == \
set([i.n(chop=True) for i in (ra, rb)])
assert solveset_real(sqrt(x) + x**Rational(1, 3) +
x**Rational(1, 4), x) == FiniteSet(0)
assert solveset_real(x/sqrt(x**2 + 1), x) == FiniteSet(0)
eq = (x - y**3)/((y**2)*sqrt(1 - y**2))
assert solveset_real(eq, x) == FiniteSet(y**3)
# issue 4497
assert solveset_real(1/(5 + x)**(S(1)/5) - 9, x) == \
FiniteSet(-295244/S(59049))
@XFAIL
def test_solve_sqrt_fail():
# this only works if we check real_root(eq.subs(x, S(1)/3))
# but checksol doesn't work like that
eq = (x**3 - 3*x**2)**Rational(1, 3) + 1 - x
assert solveset_real(eq, x) == FiniteSet(S(1)/3)
@slow
def test_solve_sqrt_3():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solveset_complex(eq, R)
assert sol == FiniteSet(*[S(5)/3 + 4*sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3,
-sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 + 40*re(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9 +
sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 + S(5)/3 +
I*(-sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 + 40*im(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9)])
# the number of real roots will depend on the value of m: for m=1 there are 4
# and for m=-1 there are none.
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
unsolved_object = ConditionSet(q, Eq((-2*sqrt(4*q**2*(m - q)**2 +
(-m + q)**2) + sqrt((-2*m**2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1) -
1)**2 + (2*m**2 - 4*m - sqrt(4*m**4 - 4*m**2 + 8*m + 1) - 1)**2
)*Abs(q))/Abs(q), 0), S.Reals)
assert solveset_real(eq, q) == unsolved_object
def test_solve_polynomial_symbolic_param():
assert solveset_complex((x**2 - 1)**2 - a, x) == \
FiniteSet(sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a)))
# By attempt to make Set.contains behave symbolically SetDifference on
# FiniteSet isn't working very well.
# Simple operations like `FiniteSet(a) - FiniteSet(-b)` raises `TypeError`
# The likely course of action will making such operations return
# SetDifference object. That will also change the expected output of
# the given tests. Till the SetDifference becomes well behaving again the
# following tests are kept as comments.
# # issue 4508
# assert solveset_complex(y - b*x/(a + x), x) == \
# FiniteSet(-a*y/(y - b))
#
# # issue 4507
# assert solveset_complex(y - b/(1 + a*x), x) == \
# FiniteSet((b - y)/(a*y))
def test_solve_rational():
assert solveset_real(1/x + 1, x) == FiniteSet(-S.One)
assert solveset_real(1/exp(x) - 1, x) == FiniteSet(0)
assert solveset_real(x*(1 - 5/x), x) == FiniteSet(5)
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
assert solveset_real((x**2/(7 - x)).diff(x), x) == \
FiniteSet(S(0), S(14))
def test_solveset_real_gen_is_pow():
assert solveset_real(sqrt(1) + 1, x) == EmptySet()
def test_no_sol():
assert solveset_real(4, x) == EmptySet()
assert solveset_real(exp(x), x) == EmptySet()
assert solveset_real(x**2 + 1, x) == EmptySet()
assert solveset_real(-3*a/sqrt(x), x) == EmptySet()
assert solveset_real(1/x, x) == EmptySet()
assert solveset_real(-(1 + x)/(2 + x)**2 + 1/(2 + x), x) == \
EmptySet()
def test_sol_zero_real():
assert solveset_real(0, x) == S.Reals
assert solveset(0, x, Interval(1, 2)) == Interval(1, 2)
assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals
def test_no_sol_rational_extragenous():
assert solveset_real((x/(x + 1) + 3)**(-2), x) == EmptySet()
assert solveset_real((x - 1)/(1 + 1/(x - 1)), x) == EmptySet()
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to
a polynomial equation using the change of variable y -> x**Rational(p, q)
"""
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert solveset_real(x*(x**(S(1) / 3) - 3), x) == \
FiniteSet(S(0), S(27))
def test_solveset_real_rational():
"""Test solveset_real for rational functions"""
assert solveset_real((x - y**3) / ((y**2)*sqrt(1 - y**2)), x) \
== FiniteSet(y**3)
# issue 4486
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
def test_solveset_real_log():
assert solveset_real(log((x-1)*(x+1)), x) == \
FiniteSet(sqrt(2), -sqrt(2))
def test_poly_gens():
assert solveset_real(4**(2*(x**2) + 2*x) - 8, x) == \
FiniteSet(-Rational(3, 2), S.Half)
@XFAIL
def test_uselogcombine_1():
assert solveset_real(log(x - 3) + log(x + 3), x) == \
FiniteSet(sqrt(10))
assert solveset_real(log(x + 1) - log(2*x - 1), x) == FiniteSet(2)
assert solveset_real(log(x + 3) + log(1 + 3/x) - 3) == FiniteSet(
-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2)
@XFAIL
def test_uselogcombine_2():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solveset_real(eq, x) == \
FiniteSet(-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z))))
def test_solve_abs():
assert solveset_real(Abs(x) - 2, x) == FiniteSet(-2, 2)
assert solveset_real(Abs(x + 3) - 2*Abs(x - 3), x) == \
FiniteSet(1, 9)
assert solveset_real(2*Abs(x) - Abs(x - 1), x) == \
FiniteSet(-1, Rational(1, 3))
assert solveset_real(Abs(x - 7) - 8, x) == FiniteSet(-S(1), S(15))
# issue 9565. Note: solveset_real does not solve this as it is
# solveset's job to handle Relationals
assert solveset(Abs((x - 1)/(x - 5)) <= S(1)/3, domain=S.Reals
) == Interval(-1, 2)
# issue #10069
eq = abs(1/(x - 1)) - 1 > 0
u = Union(Interval.open(0, 1), Interval.open(1, 2))
assert solveset_real(eq, x) == u
assert solveset(eq, x, domain=S.Reals) == u
raises(ValueError, lambda: solveset(abs(x) - 1, x))
@XFAIL
def test_rewrite_trigh():
# if this import passes then the test below should also pass
from sympy import sech
assert solveset_real(sinh(x) + sech(x), x) == FiniteSet(
2*atanh(-S.Half + sqrt(5)/2 - sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-S.Half + sqrt(5)/2 + sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-sqrt(5)/2 - S.Half + sqrt(2 + 2*sqrt(5))/2),
2*atanh(-sqrt(2 + 2*sqrt(5))/2 - sqrt(5)/2 - S.Half))
def test_real_imag_splitting():
a, b = symbols('a b', real=True, finite=True)
assert solveset_real(sqrt(a**2 - b**2) - 3, a) == \
FiniteSet(-sqrt(b**2 + 9), sqrt(b**2 + 9))
assert solveset_real(sqrt(a**2 + b**2) - 3, a) != \
S.EmptySet
def test_units():
assert solveset_real(1/x - 1/(2*cm), x) == FiniteSet(2*cm)
def test_solve_only_exp_1():
y = Symbol('y', positive=True, finite=True)
assert solveset_real(exp(x) - y, x) == FiniteSet(log(y))
assert solveset_real(exp(x) + exp(-x) - 4, x) == \
FiniteSet(log(-sqrt(3) + 2), log(sqrt(3) + 2))
assert solveset_real(exp(x) + exp(-x) - y, x) != S.EmptySet
@XFAIL
def test_solve_only_exp_2():
assert solveset_real(exp(x/y)*exp(-z/y) - 2, y) == \
FiniteSet((x - z)/log(2))
assert solveset_real(sqrt(exp(x)) + sqrt(exp(-x)) - 4, x) == \
FiniteSet(2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2))
def test_atan2():
# The .inverse() method on atan2 works only if x.is_real is True and the
# second argument is a real constant
assert solveset_real(atan2(x, 2) - pi/3, x) == FiniteSet(2*sqrt(3))
def test_piecewise():
eq = Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3
assert set(solveset_real(eq, x)) == set(FiniteSet(-1, 5))
absxm3 = Piecewise(
(x - 3, S(0) <= x - 3),
(3 - x, S(0) > x - 3))
y = Symbol('y', positive=True)
assert solveset_real(absxm3 - y, x) == FiniteSet(-y + 3, y + 3)
f = Piecewise(((x - 2)**2, x >= 0), (0, True))
assert solveset(f, x, domain=S.Reals) == Union(FiniteSet(2), Interval(-oo, 0, True, True))
assert solveset(Piecewise((x + 1, x > 0), (I, True)) - I, x) == \
Interval(-oo, 0)
def test_solveset_complex_polynomial():
from sympy.abc import x, a, b, c
assert solveset_complex(a*x**2 + b*x + c, x) == \
FiniteSet(-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a),
-b/(2*a) + sqrt(-4*a*c + b**2)/(2*a))
assert solveset_complex(x - y**3, y) == FiniteSet(
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2)
assert solveset_complex(x + 1/x - 1, x) == \
FiniteSet(Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2)
def test_sol_zero_complex():
assert solveset_complex(0, x) == S.Complexes
def test_solveset_complex_rational():
assert solveset_complex((x - 1)*(x - I)/(x - 3), x) == \
FiniteSet(1, I)
assert solveset_complex((x - y**3)/((y**2)*sqrt(1 - y**2)), x) == \
FiniteSet(y**3)
assert solveset_complex(-x**2 - I, x) == \
FiniteSet(-sqrt(2)/2 + sqrt(2)*I/2, sqrt(2)/2 - sqrt(2)*I/2)
def test_solve_quintics():
skip("This test is too slow")
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 + 15*x + 12
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
def test_solveset_complex_exp():
from sympy.abc import x, n
assert solveset_complex(exp(x) - 1, x) == \
imageset(Lambda(n, I*2*n*pi), S.Integers)
assert solveset_complex(exp(x) - I, x) == \
imageset(Lambda(n, I*(2*n*pi + pi/2)), S.Integers)
assert solveset_complex(1/exp(x), x) == S.EmptySet
assert solveset_complex(sinh(x).rewrite(exp), x) == \
imageset(Lambda(n, n*pi*I), S.Integers)
def test_solve_complex_log():
assert solveset_complex(log(x), x) == FiniteSet(1)
assert solveset_complex(1 - log(a + 4*x**2), x) == \
FiniteSet(-sqrt(-a/4 + E/4), sqrt(-a/4 + E/4))
def test_solve_complex_sqrt():
assert solveset_complex(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_complex(sqrt(5*x + 6) - (2 + 2*I) - x, x) == \
FiniteSet(-S(2), 3 - 4*I)
assert solveset_complex(4*x*(1 - a * sqrt(x)), x) == \
FiniteSet(S(0), 1 / a ** 2)
def test_solveset_complex_tan():
s = solveset_complex(tan(x).rewrite(exp), x)
assert s == imageset(Lambda(n, pi*n), S.Integers) - \
imageset(Lambda(n, pi*n + pi/2), S.Integers)
def test_solve_trig():
from sympy.abc import n
assert solveset_real(sin(x), x) == \
Union(imageset(Lambda(n, 2*pi*n), S.Integers),
imageset(Lambda(n, 2*pi*n + pi), S.Integers))
assert solveset_real(sin(x) - 1, x) == \
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers)
assert solveset_real(cos(x), x) == \
Union(imageset(Lambda(n, 2*pi*n - pi/2), S.Integers),
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers))
assert solveset_real(sin(x) + cos(x), x) == \
Union(imageset(Lambda(n, 2*n*pi - pi/4), S.Integers),
imageset(Lambda(n, 2*n*pi + 3*pi/4), S.Integers))
assert solveset_real(sin(x)**2 + cos(x)**2, x) == S.EmptySet
assert solveset_complex(cos(x) - S.Half, x) == \
Union(imageset(Lambda(n, 2*n*pi + pi/3), S.Integers),
imageset(Lambda(n, 2*n*pi - pi/3), S.Integers))
y, a = symbols('y,a')
assert solveset(sin(y + a) - sin(y), a, domain=S.Reals) == \
Union(imageset(Lambda(n, 2*n*pi), S.Integers),
imageset(Lambda(n,
-I*(I*(2*n*pi +arg(-exp(-2*I*y))) + 2*im(y))), S.Integers))
@XFAIL
def test_solve_trig_abs():
assert solveset(Eq(sin(Abs(x)), 1), x, domain=S.Reals) == \
Union(ImageSet(Lambda(n, n*pi + (-1)**n*pi/2), S.Naturals0),
ImageSet(Lambda(n, -n*pi - (-1)**n*pi/2), S.Naturals0))
def test_solve_invalid_sol():
assert 0 not in solveset_real(sin(x)/x, x)
assert 0 not in solveset_complex((exp(x) - 1)/x, x)
@XFAIL
def test_solve_trig_simplified():
from sympy.abc import n
assert solveset_real(sin(x), x) == \
imageset(Lambda(n, n*pi), S.Integers)
assert solveset_real(cos(x), x) == \
imageset(Lambda(n, n*pi + pi/2), S.Integers)
assert solveset_real(cos(x) + sin(x), x) == \
imageset(Lambda(n, n*pi - pi/4), S.Integers)
@XFAIL
def test_solve_lambert():
assert solveset_real(x*exp(x) - 1, x) == FiniteSet(LambertW(1))
assert solveset_real(x + 2**x, x) == \
FiniteSet(-LambertW(log(2))/log(2))
# issue 4739
assert solveset_real(exp(log(5)*x) - 2**x, x) == FiniteSet(0)
ans = solveset_real(3*x + 5 + 2**(-5*x + 3), x)
assert ans == FiniteSet(-Rational(5, 3) +
LambertW(-10240*2**(S(1)/3)*log(2)/3)/(5*log(2)))
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solveset_real(eq, x)
ans = FiniteSet((log(2401) +
5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1)
assert result == ans
assert solveset_real(eq.expand(), x) == result
assert solveset_real(5*x - 1 + 3*exp(2 - 7*x), x) == \
FiniteSet(Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7)
assert solveset_real(2*x + 5 + log(3*x - 2), x) == \
FiniteSet(Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2)
assert solveset_real(3*x + log(4*x), x) == \
FiniteSet(LambertW(Rational(3, 4))/3)
assert solveset_complex(x**z*y**z - 2, z) == \
FiniteSet(log(2)/(log(x) + log(y)))
assert solveset_real(x**x - 2) == FiniteSet(exp(LambertW(log(2))))
a = Symbol('a')
assert solveset_real(-a*x + 2*x*log(x), x) == FiniteSet(exp(a/2))
a = Symbol('a', real=True)
assert solveset_real(a/x + exp(x/2), x) == \
FiniteSet(2*LambertW(-a/2))
assert solveset_real((a/x + exp(x/2)).diff(x), x) == \
FiniteSet(4*LambertW(sqrt(2)*sqrt(a)/4))
assert solveset_real(1/(1/x - y + exp(y)), x) == EmptySet()
# coverage test
p = Symbol('p', positive=True)
w = Symbol('w')
assert solveset_real((1/p + 1)**(p + 1), p) == EmptySet()
assert solveset_real(tanh(x + 3)*tanh(x - 3) - 1, x) == EmptySet()
assert solveset_real(2*x**w - 4*y**w, w) == \
solveset_real((x/y)**w - 2, w)
assert solveset_real((x**2 - 2*x + 1).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*S.Exp1)/3)
assert solveset_real((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x) == \
FiniteSet(LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3)
assert solveset_real((x**2 - 2*x - 2).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3)
assert solveset_real(x*log(x) + 3*x + 1, x) == \
FiniteSet(exp(-3 + LambertW(-exp(3))))
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solveset_real(eq, x) == \
FiniteSet(LambertW(3*exp(-LambertW(3))))
assert solveset_real(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == \
FiniteSet(-((log(a**5) + LambertW(S(1)/3))/(3*log(a))))
p = symbols('p', positive=True)
assert solveset_real(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == \
FiniteSet(
log((-3**(S(1)/3) - 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((-3**(S(1)/3) + 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((3*LambertW(S(1)/3)/p**5)**(1/(3*log(p)))),) # checked numerically
# check collection
b = Symbol('b')
eq = 3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5)
assert solveset_real(eq, x) == FiniteSet(
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a))))
# issue 4271
assert solveset_real((a/x + exp(x/2)).diff(x, 2), x) == FiniteSet(
6*LambertW((-1)**(S(1)/3)*a**(S(1)/3)/3))
assert solveset_real(x**3 - 3**x, x) == \
FiniteSet(-3/log(3)*LambertW(-log(3)/3))
assert solveset_real(x**2 - 2**x, x) == FiniteSet(2)
assert solveset_real(-x**2 + 2**x, x) == FiniteSet(2)
assert solveset_real(3**cos(x) - cos(x)**3) == FiniteSet(
acos(-3*LambertW(-log(3)/3)/log(3)))
assert solveset_real(4**(x/2) - 2**(x/3), x) == FiniteSet(0)
assert solveset_real(5**(x/2) - 2**(x/3), x) == FiniteSet(0)
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solveset_real(5**(x/2) - 2**(3/x), x) == FiniteSet(-b, b)
def test_solveset():
x = Symbol('x')
raises(ValueError, lambda: solveset(x + y))
raises(ValueError, lambda: solveset(x, 1))
assert solveset(0, domain=S.Reals) == S.Reals
assert solveset(1) == S.EmptySet
assert solveset(True, domain=S.Reals) == S.Reals # issue 10197
assert solveset(False, domain=S.Reals) == S.EmptySet
assert solveset(exp(x) - 1, domain=S.Reals) == FiniteSet(0)
assert solveset(exp(x) - 1, x, S.Reals) == FiniteSet(0)
assert solveset(Eq(exp(x), 1), x, S.Reals) == FiniteSet(0)
assert solveset(x - 1 >= 0, x, S.Reals) == Interval(1, oo)
assert solveset(exp(x) - 1 >= 0, x, S.Reals) == Interval(0, oo)
assert solveset(exp(x) - 1, x) == imageset(Lambda(n, 2*I*pi*n), S.Integers)
assert solveset(Eq(exp(x), 1), x) == imageset(Lambda(n, 2*I*pi*n),
S.Integers)
def test_conditionset():
assert solveset(Eq(sin(x)**2 + cos(x)**2, 1), x, domain=S.Reals) == \
ConditionSet(x, True, S.Reals)
assert solveset(Eq(x**2 + x*sin(x), 1), x, domain=S.Reals) == \
ConditionSet(x, Eq(x*(x + sin(x)) - 1, 0), S.Reals)
assert solveset(Eq(sin(Abs(x)), x), x, domain=S.Reals) == \
ConditionSet(x, Eq(-x + sin(Abs(x)), 0), Interval(-oo, oo))
assert solveset(Eq(-I*(exp(I*x) - exp(-I*x))/2, 1), x) == \
imageset(Lambda(n, 2*n*pi + pi/2), S.Integers)
assert solveset(x + sin(x) > 1, x, domain=S.Reals) == \
ConditionSet(x, x + sin(x) > 1, S.Reals)
@XFAIL
def test_conditionset_equality():
''' Checking equality of different representations of ConditionSet'''
assert solveset(Eq(tan(x), y), x) == ConditionSet(x, Eq(tan(x), y), S.Complexes)
def test_solveset_domain():
x = Symbol('x')
assert solveset(x**2 - x - 6, x, Interval(0, oo)) == FiniteSet(3)
assert solveset(x**2 - 1, x, Interval(0, oo)) == FiniteSet(1)
assert solveset(x**4 - 16, x, Interval(0, 10)) == FiniteSet(2)
def test_improve_coverage():
from sympy.solvers.solveset import _has_rational_power
x = Symbol('x')
y = exp(x+1/x**2)
solution = solveset(y**2+y, x, S.Reals)
unsolved_object = ConditionSet(x, Eq((exp((x**3 + 1)/x**2) + 1)*exp((x**3 + 1)/x**2), 0), S.Reals)
assert solution == unsolved_object
assert _has_rational_power(sin(x)*exp(x) + 1, x) == (False, S.One)
assert _has_rational_power((sin(x)**2)*(exp(x) + 1)**3, x) == (False, S.One)
def test_issue_9522():
x = Symbol('x')
expr1 = Eq(1/(x**2 - 4) + x, 1/(x**2 - 4) + 2)
expr2 = Eq(1/x + x, 1/x)
assert solveset(expr1, x, S.Reals) == EmptySet()
assert solveset(expr2, x, S.Reals) == EmptySet()
def test_linear_eq_to_matrix():
x, y, z = symbols('x, y, z')
eqns1 = [2*x + y - 2*z - 3, x - y - z, x + y + 3*z - 12]
eqns2 = [Eq(3*x + 2*y - z, 1), Eq(2*x - 2*y + 4*z, -2), -2*x + y - 2*z]
A, b = linear_eq_to_matrix(eqns1, x, y, z)
assert A == Matrix([[2, 1, -2], [1, -1, -1], [1, 1, 3]])
assert b == Matrix([[3], [0], [12]])
A, b = linear_eq_to_matrix(eqns2, x, y, z)
assert A == Matrix([[3, 2, -1], [2, -2, 4], [-2, 1, -2]])
assert b == Matrix([[1], [-2], [0]])
# Pure symbolic coefficients
from sympy.abc import a, b, c, d, e, f, g, h, i, j, k, l
eqns3 = [a*x + b*y + c*z - d, e*x + f*y + g*z - h, i*x + j*y + k*z - l]
A, B = linear_eq_to_matrix(eqns3, x, y, z)
assert A == Matrix([[a, b, c], [e, f, g], [i, j, k]])
assert B == Matrix([[d], [h], [l]])
# raise ValueError if no symbols are given
raises(ValueError, lambda: linear_eq_to_matrix(eqns3))
def test_linsolve():
x, y, z, u, v, w = symbols("x, y, z, u, v, w")
x1, x2, x3, x4 = symbols('x1, x2, x3, x4')
# Test for different input forms
M = Matrix([[1, 2, 1, 1, 7], [1, 2, 2, -1, 12], [2, 4, 0, 6, 4]])
system1 = A, b = M[:, :-1], M[:, -1]
Eqns = [x1 + 2*x2 + x3 + x4 - 7, x1 + 2*x2 + 2*x3 - x4 - 12,
2*x1 + 4*x2 + 6*x4 - 4]
sol = FiniteSet((-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
assert linsolve(M, (x1, x2, x3, x4)) == sol
assert linsolve(Eqns, (x1, x2, x3, x4)) == sol
assert linsolve(system1, (x1, x2, x3, x4)) == sol
# raise ValueError if no symbols are given
raises(ValueError, lambda: linsolve(system1))
# raise ValueError if, A & b is not given as tuple
raises(ValueError, lambda: linsolve(A, b, x1, x2, x3, x4))
# raise ValueError for garbage value
raises(ValueError, lambda: linsolve(Eqns[0], x1, x2, x3, x4))
# Fully symbolic test
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
A = Matrix([[a, b], [c, d]])
B = Matrix([[e], [f]])
system2 = (A, B)
sol = FiniteSet(((-b*f + d*e)/(a*d - b*c), (a*f - c*e)/(a*d - b*c)))
assert linsolve(system2, [x, y]) == sol
# Test for Dummy Symbols issue #9667
x1 = Dummy('x1')
x2 = Dummy('x2')
x3 = Dummy('x3')
x4 = Dummy('x4')
assert linsolve(system1, x1, x2, x3, x4) == FiniteSet((-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
# No solution
A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])
b = Matrix([0, 0, 1])
assert linsolve((A, b), (x, y, z)) == EmptySet()
# Issue #10056
A, B, J1, J2 = symbols('A B J1 J2')
Augmatrix = Matrix([
[2*I*J1, 2*I*J2, -2/J1],
[-2*I*J2, -2*I*J1, 2/J2],
[0, 2, 2*I/(J1*J2)],
[2, 0, 0],
])
assert linsolve(Augmatrix, A, B) == FiniteSet((0, I/(J1*J2)))
# Issue #10121 - Assignment of free variables
a, b, c, d, e = symbols('a, b, c, d, e')
Augmatrix = Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]])
assert linsolve(Augmatrix, a, b, c, d, e) == FiniteSet((a, 0, c, 0, e))
def test_issue_9556():
x = Symbol('x')
b = Symbol('b', positive=True)
assert solveset(Abs(x) + 1, x, S.Reals) == EmptySet()
assert solveset(Abs(x) + b, x, S.Reals) == EmptySet()
assert solveset(Eq(b, -1), b, S.Reals) == EmptySet()
def test_issue_9611():
x = Symbol('x')
a = Symbol('a')
y = Symbol('y')
assert solveset(Eq(x - x + a, a), x, S.Reals) == S.Reals
assert solveset(Eq(y - y + a, a), y) == S.Complexes
def test_issue_9557():
x = Symbol('x')
a = Symbol('a')
assert solveset(x**2 + a, x, S.Reals) == Intersection(S.Reals,
FiniteSet(-sqrt(-a), sqrt(-a)))
def test_issue_9778():
assert solveset(x**3 + 1, x, S.Reals) == FiniteSet(-1)
assert solveset(x**(S(3)/5) + 1, x, S.Reals) == S.EmptySet
assert solveset(x**3 + y, x, S.Reals) == Intersection(Interval(-oo, oo), \
FiniteSet((-y)**(S(1)/3)*Piecewise((1, Ne(-im(y), 0)), ((-1)**(S(2)/3), -y < 0), (1, True))))
@XFAIL
def test_issue_failing_pow():
assert solveset(x**(S(3)/2) + 4, x, S.Reals) == S.EmptySet
def test_issue_9849():
assert solveset(Abs(sin(x)) + 1, x, S.Reals) == S.EmptySet
def test_issue_9953():
assert linsolve([ ], x) == S.EmptySet
def test_issue_9913():
assert solveset(2*x + 1/(x - 10)**2, x, S.Reals) == \
FiniteSet(-(3*sqrt(24081)/4 + S(4027)/4)**(S(1)/3)/3 - 100/
(3*(3*sqrt(24081)/4 + S(4027)/4)**(S(1)/3)) + S(20)/3)
def test_issue_10397():
assert solveset(sqrt(x), x, S.Complexes) == FiniteSet(0)
def test_simplification():
eq = x + (a - b)/(-2*a + 2*b)
assert solveset(eq, x) == FiniteSet(S.Half)
assert solveset(eq, x, S.Reals) == FiniteSet(S.Half)
def test_issue_10555():
f = Function('f')
assert solveset(f(x) - pi/2, x, S.Reals) == \
ConditionSet(x, Eq(2*f(x) - pi, 0), S.Reals)
def test_issue_8715():
eq = x + 1/x > -2 + 1/x
assert solveset(eq, x, S.Reals) == \
(Interval.open(-2, oo) - FiniteSet(0))
assert solveset(eq.subs(x,log(x)), x, S.Reals) == \
Interval.open(exp(-2), oo) - FiniteSet(1)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:ElectrumTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_ELECTRUMD' not in vars():
ENABLE_ELECTRUMD=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "ELECTRUMD" not in os.environ:
os.environ["ELECTRUMD"] = buildDir + '/src/electrumd' + EXEEXT
if "ELECTRUMCLI" not in os.environ:
os.environ["ELECTRUMCLI"] = buildDir + '/src/electrum-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/electrum/electrum/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/electrum/electrum/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_ELECTRUMD == 1):
print("No rpc tests to run. Wallet, utils, and electrumd must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `electrum-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import urllib
import time
from urllib import unquote
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
from swift.common import utils, exceptions
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, HTTPPreconditionFailed, HTTPNotImplemented, \
HTTPException
MAX_FILE_SIZE = 5368709122
MAX_META_NAME_LENGTH = 128
MAX_META_VALUE_LENGTH = 256
MAX_META_COUNT = 90
MAX_META_OVERALL_SIZE = 4096
MAX_HEADER_SIZE = 8192
MAX_OBJECT_NAME_LENGTH = 1024
CONTAINER_LISTING_LIMIT = 10000
ACCOUNT_LISTING_LIMIT = 10000
MAX_ACCOUNT_NAME_LENGTH = 256
MAX_CONTAINER_NAME_LENGTH = 256
VALID_API_VERSIONS = ["v1", "v1.0"]
EXTRA_HEADER_COUNT = 0
# If adding an entry to DEFAULT_CONSTRAINTS, note that
# these constraints are automatically published by the
# proxy server in responses to /info requests, with values
# updated by reload_constraints()
DEFAULT_CONSTRAINTS = {
'max_file_size': MAX_FILE_SIZE,
'max_meta_name_length': MAX_META_NAME_LENGTH,
'max_meta_value_length': MAX_META_VALUE_LENGTH,
'max_meta_count': MAX_META_COUNT,
'max_meta_overall_size': MAX_META_OVERALL_SIZE,
'max_header_size': MAX_HEADER_SIZE,
'max_object_name_length': MAX_OBJECT_NAME_LENGTH,
'container_listing_limit': CONTAINER_LISTING_LIMIT,
'account_listing_limit': ACCOUNT_LISTING_LIMIT,
'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH,
'max_container_name_length': MAX_CONTAINER_NAME_LENGTH,
'valid_api_versions': VALID_API_VERSIONS,
'extra_header_count': EXTRA_HEADER_COUNT,
}
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE
EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global contraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name in DEFAULT_CONSTRAINTS:
try:
value = constraints_conf.get('swift-constraints', name)
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
try:
value = int(value)
except ValueError:
value = utils.list_from_csv(value)
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
reload_constraints()
# Maximum slo segments in buffer
MAX_BUFFERED_SLO_SEGMENTS = 10000
#: Query string format= values to their corresponding content-type values
FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',
'xml': 'application/xml'}
# By default the maximum number of allowed headers depends on the number of max
# allowed metadata settings plus a default value of 32 for regular http
# headers. If for some reason this is not enough (custom middleware for
# example) it can be increased with the extra_header_count constraint.
MAX_HEADER_COUNT = MAX_META_COUNT + 32 + max(EXTRA_HEADER_COUNT, 0)
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers. This should only check
that the metadata in the request given is valid. Checks against
account/container overall metadata should be forwarded on to its
respective server to be checked.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in req.headers.items():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
return HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns HTTPRequestEntityTooLarge: the object is too large
:returns HTTPLengthRequired: missing content-length header and not
a chunked request
:returns HTTPBadRequest: missing or bad content-type header, or
bad metadata
:returns HTTPNotImplemented: unsupported transfer-encoding header value
"""
try:
ml = req.message_length()
except ValueError as e:
return HTTPBadRequest(request=req, content_type='text/plain',
body=str(e))
except AttributeError as e:
return HTTPNotImplemented(request=req, content_type='text/plain',
body=str(e))
if ml is not None and ml > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(body='Missing Content-Length header.',
request=req,
content_type='text/plain')
if 'X-Copy-From' in req.headers and req.content_length:
return HTTPBadRequest(body='Copy requests require a zero byte body',
request=req, content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body='No content type')
try:
req = check_delete_headers(req)
except HTTPException as e:
return HTTPBadRequest(request=req, body=e.body,
content_type='text/plain')
if not check_utf8(req.headers['Content-Type']):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
return check_metadata(req, 'object')
def check_dir(root, drive):
"""
Verify that the path to the device is a directory and is a lesser
constraint that is enforced when a full mount_check isn't possible
with, for instance, a VM using loopback or partitions.
:param root: base path where the dir is
:param drive: drive name to be checked
:returns: True if it is a valid directoy, False otherwise
"""
return os.path.isdir(os.path.join(root, drive))
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
path = os.path.join(root, drive)
return utils.ismount(path)
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
def valid_timestamp(request):
"""
Helper function to extract a timestamp from requests that require one.
:param request: the swob request object
:returns: a valid Timestamp instance
:raises: HTTPBadRequest on missing or invalid X-Timestamp
"""
try:
return request.timestamp
except exceptions.InvalidTimestamp as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
def check_delete_headers(request):
"""
Validate if 'x-delete' headers are have correct values
values should be positive integers and correspond to
a time in the future.
:param request: the swob request object
:returns: HTTPBadRequest in case of invalid values
or None if values are ok
"""
if 'x-delete-after' in request.headers:
try:
x_delete_after = int(request.headers['x-delete-after'])
except ValueError:
raise HTTPBadRequest(request=request,
content_type='text/plain',
body='Non-integer X-Delete-After')
actual_del_time = time.time() + x_delete_after
if actual_del_time < time.time():
raise HTTPBadRequest(request=request,
content_type='text/plain',
body='X-Delete-After in past')
request.headers['x-delete-at'] = utils.normalize_delete_at_timestamp(
actual_del_time)
if 'x-delete-at' in request.headers:
try:
x_delete_at = int(utils.normalize_delete_at_timestamp(
int(request.headers['x-delete-at'])))
except ValueError:
raise HTTPBadRequest(request=request, content_type='text/plain',
body='Non-integer X-Delete-At')
if x_delete_at < time.time():
raise HTTPBadRequest(request=request, content_type='text/plain',
body='X-Delete-At in past')
return request
def check_utf8(string):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any null character.
:param string: string to be validated
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, unicode):
string.encode('utf-8')
else:
decoded = string.decode('UTF-8')
if decoded.encode('UTF-8') != string:
return False
# A UTF-8 string with surrogates in it is invalid.
if any(0xD800 <= ord(codepoint) <= 0xDFFF
for codepoint in decoded):
return False
return '\x00' not in string
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
def check_path_header(req, name, length, error_msg):
"""
Validate that the value of path-like header is
well formatted. We assume the caller ensures that
specific header is present in req.headers.
:param req: HTTP request object
:param name: header name
:param length: length of path segment check
:param error_msg: error message for client
:returns: A tuple with path parts according to length
:raise: HTTPPreconditionFailed if header value
is not well formatted.
"""
src_header = unquote(req.headers.get(name))
if not src_header.startswith('/'):
src_header = '/' + src_header
try:
return utils.split_path(src_header, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body=error_msg)
def check_copy_from_header(req):
"""
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise: HTTPPreconditionFailed if x-copy-from value
is not well formatted.
"""
return check_path_header(req, 'X-Copy-From', 2,
'X-Copy-From header must be of the form '
'<container name>/<object name>')
def check_destination_header(req):
"""
Validate that the value from destination header is
well formatted. We assume the caller ensures that
destination header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise: HTTPPreconditionFailed if destination value
is not well formatted.
"""
return check_path_header(req, 'Destination', 2,
'Destination header must be of the form '
'<container name>/<object name>')
def check_name_format(req, name, target_type):
"""
Validate that the header contains valid account or container name.
:param req: HTTP request object
:param name: header value to validate
:param target_type: which header is being validated (Account or Container)
:returns: A properly encoded account name or container name
:raise: HTTPPreconditionFailed if account header
is not well formatted.
"""
if not name:
raise HTTPPreconditionFailed(
request=req,
body='%s name cannot be empty' % target_type)
if isinstance(name, unicode):
name = name.encode('utf-8')
if '/' in name:
raise HTTPPreconditionFailed(
request=req,
body='%s name cannot contain slashes' % target_type)
return name
check_account_format = functools.partial(check_name_format,
target_type='Account')
check_container_format = functools.partial(check_name_format,
target_type='Container')
def valid_api_version(version):
""" Checks if the requested version is valid.
Currently Swift only supports "v1" and "v1.0". """
global VALID_API_VERSIONS
if not isinstance(VALID_API_VERSIONS, list):
VALID_API_VERSIONS = [str(VALID_API_VERSIONS)]
return version in VALID_API_VERSIONS
| |
#-*- coding: utf-8 -*-
import sys
from astropy.coordinates import SkyCoord
from astropy import units as u
class HourAngle(object):
def __init__(self, hh=None, mm=None, ss=None):
self.hh = hh
self.mm = mm
self.ss = ss
def from_str(self, s):
s = s.strip()
ps = s.find(' ')
self.hh = int(s[:ps])
s = s[ps:].strip()
ps = s.find(' ')
self.mm = int(s[:ps])
s = s[ps:].strip()
self.ss = float(s)
def __str__(self):
return ' {: 03d}:{:02d}:{:05.2f}'.format(self.hh, self.mm, self.ss)
class Cluster(object):
def __init__(self):
# General
self.gid = '' # Cluster identification
self.name = None # Other commonly used cluster name
# Coordinates
self.ra = None # Right ascension (Epoch J2000)
self.dec = None # Declination (Epoch J2000)
self.gallon = None # Galactic longitude (Degrees)
self.gallat = None # Galactic latitude (Degrees)
self.dist_from_sun = None # Distance from sum (kpc)
# Metallicity
self.metallicity = None # Metallicity [Fe/H]
self.w_mean_met = None # Weight of mean metallicity
# Photometry
self.m_v_t = None # Cluster luminosity, M_V,t = V_t - (m-M)V
self.ph_u_b = None # U-B
self.ph_b_v = None # B-V
self.ph_v_r = None # V-R
self.ph_v_i = None # V-i
self.ellipticity = None # Projected ellipticity of isophotes, e = 1-(b/a)
# Velocities
self.v_r = None # Heliocentric radial velocity (km/s)
self.v_r_err = None # Observational uncertainty in radial velocity
self.sig_v = None # Central velocity dispersion (km/s)
self.sig_err = None # Observational uncertainty in velocity dispersion
# Structural parameters
self.sp_c = None # King-model central concentration, c=log(r_t/r_c)
self.sp_r_c = None # Core radius (arcmin)
self.sp_r_h = None # Half-light radius (arcmin)
self.sp_mu_V = None # Central surface brightness (V magnitudes per arcsec^2)
self.sp_rho_0 = None # Central luminosity density, log_10(solar_lum/pc^3)
def fill_in(self, do):
if self.name:
do.name = self.name
if self.ra:
do.ra = self.ra
do.dec = self.dec
if self.gallon:
do.gallon = self.gallon
do.gallat = self.gallat
if self.dist_from_sun:
do.dfs = self.dist_from_sun
if self.metallicity:
do.metallicity = self.metallicity
if self.w_mean_met:
do.w_mean_met = self.w_mean_met
if self.m_v_t:
do.m_v_t = self.m_v_t
if self.ph_u_b:
do.ph_u_b = self.ph_u_b
if self.ph_b_v:
do.ph_b_v = self.ph_b_v
if self.ph_v_r:
do.ph_v_r = self.ph_v_r
if self.ph_v_i:
do.ph_v_i = self.ph_v_i
if self.ellipticity:
do.ellipticity = self.ellipticity
if self.v_r:
do.v_r = self.v_r
do.v_r_err = self.v_r_err
if self.sig_v:
do.sig_v = self.sig_v
do.sig_err = self.sig_err
if self.sp_c:
do.sp_c = self.sp_c
if self.sp_r_c:
do.sp_r_c = self.sp_r_c
if self.sp_r_h:
do.sp_r_h = self.sp_r_h
if self.sp_mu_V:
do.sp_mu_V = self.sp_mu_V
if self.sp_rho_0:
do.sp_rho_0 = self.sp_rho_0
cluster_list = {}
def read_float(s):
try:
val = float(s.strip())
except:
val = None
return val
def read_str(s):
s = s.strip()
return None if s == '' else s
# Ruler for f1
#0 1 2 3 4 5 6 7 8 9 100
#01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
# NGC 104 47 Tuc 00 24 05.67 -72 04 52.6 305.89 -44.89 4.5 7.4 1.9 -2.6 -3.1
# Parsing first file
f1 = open('import_data/f1.dat')
for line in f1:
c = Cluster()
c.gid = line[:12].strip()
c.name = read_str(line[12:25])
ra_str = read_str(line[25:38])
dec_str = read_str(line[38:50])
c.gallon = read_float(line[50:58])
c.gallat = read_float(line[58:66])
c.dist_from_sun = read_float(line[66:73])
#c.dist_from_gal_cen = read_float(line[73:79])
#c.gal_dist_comp[0] = read_float(line[79:85])
#c.gal_dist_comp[1] = read_float(line[85:91])
#c.gal_dist_comp[2] = read_float(line[91:])
coo = SkyCoord(ra=ra_str, dec=dec_str, unit=(u.hourangle, u.degree))
c.ra = coo.ra.deg
c.dec = coo.dec.deg
cluster_list[c.gid] = c
f1.close()
# Ruler for f2
#0 1 2 3 4 5 6 7 8 9 100
#01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
# NGC 104 -0.72 10 0.04 14.06 13.37 3.95 -9.42 0.37 0.88 0.53 1.14 G4 0.09
# Now parsing second file
f2 = open('import_data/f2.dat')
for line in f2:
gid = line[:12].strip()
c = cluster_list[gid]
c.metallicity = read_float(line[13:18])
c.w_mean_met = read_float(line[18:21])
#c.eb_v = read_float(line[21:28])
#c.v_hb = read_float(line[28:34])
#c.app_vd_mod = read_float(line[34:40])
#c.v_t = read_float(line[40:46])
c.m_v_t = read_float(line[46:53])
c.ph_u_b = read_float(line[53:60])
c.ph_b_v = read_float(line[60:66])
c.ph_v_r = read_float(line[66:72])
c.ph_v_i = read_float(line[72:78])
#c.spt = read_str(line[78:82])
c.ellipticity = read_float(line[82:])
f2.close()
# Ruler for f3
#0 1 2 3 4 5 6 7 8 9 100
#01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
# NGC 104 -18.0 0.1 -26.7 11.0 0.3 2.07 0.36 3.17 14.38 4.88 7.84 9.55
f3 = open('import_data/f3.dat')
for line in f3:
gid = line[:12].strip()
c = cluster_list[gid]
c.v_r = read_float(line[12:19])
c.v_r_err = read_float(line[19:25])
#c.c_LSR = read_float(line[25:33])
c.sig_v = read_float(line[33:41])
c.sig_err = read_float(line[41:47])
c.c = read_float(line[47:54])
c.r_c = read_float(line[54:64])
c.r_h = read_float(line[64:70])
c.mu_V = read_float(line[70:78])
c.rho_0 = read_float(line[78:85])
#c.lg_tc = read_float(line[85:92])
#c.lg_th = read_float(line[92:])
f3.close()
from catalogue.models import *
def insert_in_django():
# Emptying the tables
for c in GlobularCluster.objects.all():
c.delete()
for o in Observation.objects.all():
o.delete()
for r in Reference.objects.all():
r.delete()
# Inserting a reference for the Harris Catalogue
r = Reference(name='Harris catalogue', doi='10.1086/118116', ads='')
r.save()
print('Inserting into database :')
# Now creating the observations for every cluster:
for c in cluster_list.values():
dc = GlobularCluster(cluster_id = c.gid)
print(' . {}'.format(c.gid))
dc.save()
do = Observation()
do.cluster_id = dc
do.ref = r
c.fill_in(do)
do.save()
| |
import pytest
from osf_tests.factories import SubjectFactory
class ProviderMixinBase(object):
@property
def provider_class(self):
raise NotImplementedError
@pytest.mark.django_db
class ProviderExistsMixin(ProviderMixinBase):
# Regression for https://openscience.atlassian.net/browse/OSF-7621
@pytest.fixture()
def fake_url(self):
raise NotImplementedError
@pytest.fixture()
def provider_url(self):
raise NotImplementedError
@pytest.fixture()
def provider_url_two(self):
raise NotImplementedError
@pytest.fixture()
def provider_list_url(self):
raise NotImplementedError
@pytest.fixture()
def provider_list_url_fake(self):
raise NotImplementedError
@pytest.fixture()
def provider(self):
return self.provider_class()
@pytest.fixture()
def provider_two(self):
return self.provider_class()
def test_provider_exists(self, app, provider_url, fake_url, provider_list_url, provider_list_url_fake):
detail_res = app.get(provider_url)
assert detail_res.status_code == 200
licenses_res = app.get('{}licenses/'.format(provider_url))
assert licenses_res.status_code == 200
res = app.get(provider_list_url)
assert res.status_code == 200
taxonomies_res = app.get('{}taxonomies/'.format(provider_url))
assert taxonomies_res.status_code == 200
# test_preprint_provider_does_not_exist_returns_404
detail_res = app.get(fake_url, expect_errors=True)
assert detail_res.status_code == 404
licenses_res = app.get(
'{}licenses/'.format(fake_url),
expect_errors=True)
assert licenses_res.status_code == 404
res = app.get(
provider_list_url_fake,
expect_errors=True)
assert res.status_code == 404
taxonomies_res = app.get(
'{}taxonomies/'.format(fake_url),
expect_errors=True)
assert taxonomies_res.status_code == 404
def test_has_highlighted_subjects_flag(
self, app, provider,
provider_two, provider_url, provider_url_two):
SubjectFactory(
provider=provider,
text='A', highlighted=True)
SubjectFactory(provider=provider_two, text='B')
res = app.get(provider_url)
assert res.status_code == 200
res_subjects = res.json['data']['relationships']['highlighted_taxonomies']
assert res_subjects['links']['related']['meta']['has_highlighted_subjects'] is True
res = app.get(provider_url_two)
assert res.status_code == 200
res_subjects = res.json['data']['relationships']['highlighted_taxonomies']
assert res_subjects['links']['related']['meta']['has_highlighted_subjects'] is False
@pytest.mark.django_db
class ProviderSubjectsMixin(ProviderMixinBase):
'''
Subject Hierarchy
+-----------------------------+
| |
| +-------->B+----->F |
| | |
| A+----------->C |
| | |
| +-------->D+----->G |
| |
| H+------>I+----->J |
| | |
| +----->K |
| |
| L+------>M+----->N |
| | |
| +------->E |
| |
| O |
+-----------------------------+
'''
@pytest.fixture(autouse=True)
def subA(self):
return SubjectFactory(text='A')
@pytest.fixture(autouse=True)
def subB(self, subA):
return SubjectFactory(text='B', parent=subA)
@pytest.fixture(autouse=True)
def subC(self, subA):
return SubjectFactory(text='C', parent=subA)
@pytest.fixture(autouse=True)
def subD(self, subA):
return SubjectFactory(text='D', parent=subA)
@pytest.fixture(autouse=True)
def subF(self, subB):
return SubjectFactory(text='F', parent=subB)
@pytest.fixture(autouse=True)
def subG(self, subD):
return SubjectFactory(text='G', parent=subD)
@pytest.fixture(autouse=True)
def subH(self):
return SubjectFactory(text='H')
@pytest.fixture(autouse=True)
def subI(self, subH):
return SubjectFactory(text='I', parent=subH)
@pytest.fixture(autouse=True)
def subJ(self, subI):
return SubjectFactory(text='J', parent=subI)
@pytest.fixture(autouse=True)
def subK(self, subI):
return SubjectFactory(text='K', parent=subI)
@pytest.fixture(autouse=True)
def subL(self):
return SubjectFactory(text='L')
@pytest.fixture(autouse=True)
def subM(self, subL):
return SubjectFactory(text='M', parent=subL)
@pytest.fixture(autouse=True)
def subE(self, subM):
return SubjectFactory(text='E', parent=subM)
@pytest.fixture(autouse=True)
def subN(self, subM):
return SubjectFactory(text='N', parent=subM)
@pytest.fixture(autouse=True)
def subO(self):
return SubjectFactory(text='O')
@pytest.fixture()
def rules(self, subA, subB, subD, subH, subI, subJ, subL):
return [
([subA._id, subB._id], False),
([subA._id, subD._id], True),
([subH._id, subI._id, subJ._id], True),
([subL._id], True)
]
# This should allow: A, B, D, G, H, I, J, L, M, N and E
# This should not allow: C, F, K, O
@pytest.fixture()
def lawless_provider(self):
return self.provider_class()
@pytest.fixture()
def ruled_provider(self, rules):
provider = self.provider_class()
provider.subjects_acceptable = rules
provider.save()
return provider
@pytest.fixture()
def lawless_url(self):
raise NotImplementedError
@pytest.fixture()
def ruled_url(self):
raise NotImplementedError
@pytest.fixture()
def base_url(self):
raise NotImplementedError
def test_max_page_size(self, app, lawless_provider, base_url):
res = app.get(base_url)
assert res.status_code == 200
assert res.json['links']['meta']['per_page'] == 10
res = app.get(base_url + '?page[size]=150')
assert res.status_code == 200
assert res.json['links']['meta']['per_page'] == 150
res = app.get(base_url + '?page[size]=2018')
assert res.status_code == 200
assert res.json['links']['meta']['per_page'] == 1000
def test_no_rules_grabs_all(self, app, lawless_url):
res = app.get(lawless_url)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 15
def test_rules_only_grab_acceptable_subjects(self, app, ruled_url):
res = app.get(ruled_url)
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 11
def test_no_rules_with_null_parent_filter(self, app, lawless_url):
res = app.get(lawless_url + 'filter[parents]=null')
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 4
def test_rules_enforced_with_null_parent_filter(self, app, ruled_url):
res = app.get(ruled_url + 'filter[parents]=null')
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 3
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'A' in texts
assert 'H' in texts
assert 'L' in texts
assert 'O' not in texts
def test_no_rules_with_parents_filter(self, app, lawless_url, subB, subI, subM):
res = app.get(
lawless_url +
'filter[parents]={}'.format(
subB._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert res.json['data'][0]['attributes']['text'] == 'F'
res = app.get(
lawless_url +
'filter[parents]={}'.format(
subI._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
res = app.get(
lawless_url +
'filter[parents]={}'.format(
subM._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
def test_rules_enforced_with_parents_filter(self, app, ruled_url, subB, subI, subM):
res = app.get(
ruled_url +
'filter[parents]={}'.format(
subB._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 0
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'F' not in texts
res = app.get(
ruled_url +
'filter[parents]={}'.format(
subI._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'J' in texts
assert 'K' not in texts
res = app.get(
ruled_url +
'filter[parents]={}'.format(
subM._id))
def test_no_rules_with_parent_filter(self, app, lawless_url, subB, subI, subM):
res = app.get(
lawless_url +
'filter[parent]={}'.format(
subB._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
assert res.json['data'][0]['attributes']['text'] == 'F'
res = app.get(
lawless_url +
'filter[parent]={}'.format(
subI._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
res = app.get(
lawless_url +
'filter[parent]={}'.format(
subM._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
def test_rules_enforced_with_parent_filter(self, app, ruled_url, subB, subI, subM):
res = app.get(
ruled_url +
'filter[parent]={}'.format(
subB._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 0
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'F' not in texts
res = app.get(
ruled_url +
'filter[parent]={}'.format(
subI._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 1
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'J' in texts
assert 'K' not in texts
res = app.get(
ruled_url +
'filter[parent]={}'.format(
subM._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'N' in texts
assert 'E' in texts
def test_no_rules_with_grandparent_filter(self, app, lawless_url, subA):
res = app.get(
lawless_url +
'filter[parents]={}'.format(
subA._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 3
def test_rules_enforced_with_grandparent_filter(self, app, ruled_url, subA):
res = app.get(
ruled_url +
'filter[parents]={}'.format(
subA._id))
assert res.status_code == 200
assert res.json['links']['meta']['total'] == 2
texts = [item['attributes']['text'] for item in res.json['data']]
assert 'B' in texts
assert 'D' in texts
assert 'C' not in texts
@pytest.mark.django_db
class ProviderSpecificSubjectsMixin(ProviderMixinBase):
@pytest.fixture(autouse=True)
def provider_1(self):
return self.provider_class()
@pytest.fixture(autouse=True)
def provider_2(self):
return self.provider_class()
@pytest.fixture(autouse=True)
def root_subject_1(self, provider_1):
return SubjectFactory(text='R1', provider=provider_1)
@pytest.fixture(autouse=True)
def parent_subject_1(self, provider_1, root_subject_1):
return SubjectFactory(text='P1', provider=provider_1, parent=root_subject_1)
@pytest.fixture(autouse=True)
def child_subject_1(self, provider_1, parent_subject_1):
return SubjectFactory(text='C1', provider=provider_1, parent=parent_subject_1)
@pytest.fixture(autouse=True)
def root_subject_2(self, provider_2):
return SubjectFactory(text='R2', provider=provider_2)
@pytest.fixture(autouse=True)
def parent_subject_2(self, provider_2, root_subject_2):
return SubjectFactory(text='P2', provider=provider_2, parent=root_subject_2)
@pytest.fixture(autouse=True)
def child_subject_2(self, provider_2, parent_subject_2):
return SubjectFactory(text='C2', provider=provider_2, parent=parent_subject_2)
@pytest.fixture()
def url_1(self):
raise NotImplementedError
@pytest.fixture()
def url_2(self):
raise NotImplementedError
def test_mapped_subjects_are_not_shared_list(self, app, url_1, url_2):
res_1 = app.get(url_1)
res_2 = app.get(url_2)
assert res_1.status_code == 200
assert res_2.status_code == 200
assert res_1.json['links']['meta']['total'] == 3
assert res_2.json['links']['meta']['total'] == 3
assert len(set([d['attributes']['text'] for d in res_1.json['data']]) &
set([d['attributes']['text'] for d in res_2.json['data']])) \
== 0
assert len(set([d['attributes']['text'] for d in res_1.json['data']]) |
set([d['attributes']['text'] for d in res_2.json['data']])) \
== 6
def test_mapped_subjects_are_not_shared_filter(self, app, url_1, url_2, root_subject_1, root_subject_2):
res_1 = app.get(
url_1 +
'filter[parent]={}'.format(
root_subject_1._id))
res_2 = app.get(
url_2 +
'filter[parent]={}'.format(
root_subject_2._id))
assert res_1.status_code == 200
assert res_2.status_code == 200
assert res_1.json['links']['meta']['total'] == 1
assert res_2.json['links']['meta']['total'] == 1
assert len(set([d['attributes']['text'] for d in res_1.json['data']]) &
set([d['attributes']['text'] for d in res_2.json['data']])) \
== 0
assert len(set([d['attributes']['text'] for d in res_1.json['data']]) |
set([d['attributes']['text'] for d in res_2.json['data']])) \
== 2
def test_mapped_subjects_filter_wrong_provider(self, app, url_1, url_2, root_subject_1, root_subject_2):
res_1 = app.get(
url_1 +
'filter[parent]={}'.format(
root_subject_2))
res_2 = app.get(
url_2 +
'filter[parent]={}'.format(
root_subject_1))
assert res_1.status_code == 200
assert res_2.status_code == 200
assert res_1.json['links']['meta']['total'] == 0
assert res_2.json['links']['meta']['total'] == 0
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions used by multiple converter files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.protobuf import config_pb2 as _config_pb2
from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training.saver import export_meta_graph as _export_meta_graph
# Map of tf.dtypes to TFLite types_flag_pb2.
_MAP_TF_TO_TFLITE_TYPES = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.float16: _types_pb2.FLOAT16,
dtypes.int32: _types_pb2.INT32,
dtypes.int64: _types_pb2.INT64,
dtypes.string: _types_pb2.STRING,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.int8: _types_pb2.INT8,
dtypes.complex64: _types_pb2.COMPLEX64
}
_LOWER_USING_SWITCH_MERGE = "_lower_using_switch_merge"
def convert_dtype_to_tflite_type(tf_dtype):
"""Converts tf.dtype to TFLite proto type.
Args:
tf_dtype: tf.dtype
Raises:
ValueError: Unsupported tf.dtype.
Returns:
types_flag_pb2.
"""
result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)
if result is None:
raise ValueError("Unsupported tf.dtype {0}".format(tf_dtype))
return result
def get_tensor_name(tensor):
"""Returns name of the input tensor.
Args:
tensor: tf.Tensor
Returns:
str
"""
parts = tensor.name.split(":")
if len(parts) > 2:
raise ValueError("Tensor name invalid. Expect 0 or 1 colon, got {0}".format(
len(parts) - 1))
# To be consistent with the tensor naming scheme in tensorflow, we need
# drop the ':0' suffix for the first tensor.
if len(parts) > 1 and parts[1] != "0":
return tensor.name
return parts[0]
def get_tensors_from_tensor_names(graph, tensor_names):
"""Gets the Tensors associated with the `tensor_names` in the provided graph.
Args:
graph: TensorFlow Graph.
tensor_names: List of strings that represent names of tensors in the graph.
Returns:
A list of Tensor objects in the same order the names are provided.
Raises:
ValueError:
tensor_names contains an invalid tensor name.
"""
# Get the list of all of the tensors.
tensor_name_to_tensor = {}
for op in graph.get_operations():
for tensor in op.values():
tensor_name_to_tensor[get_tensor_name(tensor)] = tensor
# Get the tensors associated with tensor_names.
tensors = []
invalid_tensors = []
for name in tensor_names:
tensor = tensor_name_to_tensor.get(name)
if tensor is None:
invalid_tensors.append(name)
else:
tensors.append(tensor)
# Throw ValueError if any user input names are not valid tensors.
if invalid_tensors:
raise ValueError("Invalid tensors '{}' were found.".format(
",".join(invalid_tensors)))
return tensors
def set_tensor_shapes(tensors, shapes):
"""Sets Tensor shape for each tensor if the shape is defined.
Args:
tensors: TensorFlow ops.Tensor.
shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Raises:
ValueError:
`shapes` contains an invalid tensor.
`shapes` contains an invalid shape for a valid tensor.
"""
if shapes:
tensor_names_to_tensor = {
get_tensor_name(tensor): tensor for tensor in tensors
}
for name, shape in shapes.items():
if name not in tensor_names_to_tensor:
raise ValueError("Invalid tensor \'{}\' found in tensor shapes "
"map.".format(name))
if shape is not None:
tensor = tensor_names_to_tensor[name]
try:
tensor.set_shape(shape)
except ValueError as error:
message = ("The shape of tensor '{0}' cannot be changed from {1} to "
"{2}. {3}".format(name, tensor.shape, shape, str(error)))
raise ValueError(message)
def get_grappler_config(optimizers_list):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers_list: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
config = _config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
for optimizer in optimizers_list:
rewrite_options.optimizers.append(optimizer)
return config
def run_graph_optimizations(graph_def,
input_arrays,
output_arrays,
config,
graph=None):
"""Apply standard TensorFlow optimizations to the graph_def.
Args:
graph_def: Frozen GraphDef to be optimized.
input_arrays: List of arrays that are considered inputs of the graph.
output_arrays: List of arrays that are considered outputs of the graph.
config: tf.ConfigProto.
graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)
Returns:
A new, optimized GraphDef.
"""
meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)
# We need to add a collection called 'train_op' so that grappler
# knows what the outputs are.
fetch_collection = _meta_graph_pb2.CollectionDef()
for array in input_arrays + output_arrays:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _remove_lower_using_switch_merge(graph_def):
"""Remove '_lower_using_switch_merge' attributes from the given graph.
Args:
graph_def: GraphDef to be optimized.
Returns:
A new GraphDef that with no '_lower_using_switch_merge' attribute.
"""
out = _graph_pb2.GraphDef()
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
for node in graph_def.node:
new_node = copy.deepcopy(node)
if new_node.op == "While":
new_node.attr[_LOWER_USING_SWITCH_MERGE].b = False
out.node.extend([new_node])
return out
def _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes):
if is_frozen_graph(sess):
raise ValueError("Try to convert op hints, needs unfrozen graph.")
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph_def, output_arrays + hinted_outputs_nodes)
graph_def = convert_op_hints_to_stubs(graph_def=graph_def)
graph_def = tf_graph_util.remove_training_nodes(graph_def)
return graph_def
def freeze_graph(sess, input_tensors, output_tensors):
"""Returns a frozen GraphDef.
Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the
existing GraphDef is returned. The Grappler pass is only run on models that
are frozen in order to inline the functions in the graph.
If OpHints is present, it will try to convert the OpHint graph.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
"""
# Runs a Grappler pass in order to inline any functions in the graph.
# Asides from inlining any simple function, Grappler will also try to lower
# while loop into switch merge representation which is undesired for Ophints,
# so we simply remove those attributes to prevent Grappler from doing so.
graph_def = _remove_lower_using_switch_merge(sess.graph_def)
config = get_grappler_config(["function"])
graph_def = run_graph_optimizations(
graph_def, input_tensors, output_tensors, config, graph=sess.graph)
# If ophints are present, just convert them.
hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
if hinted_outputs_nodes:
return _convert_op_hints_if_present(sess, graph_def, output_tensors,
hinted_outputs_nodes)
if not is_frozen_graph(sess):
output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
return tf_graph_util.convert_variables_to_constants(sess, graph_def,
output_arrays)
else:
return sess.graph_def
def is_frozen_graph(sess):
"""Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
| |
###############################################################################
# Synchronization primitives based on our SemLock implementation
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/synchronize.py (17/02/2017)
# * Remove ctx argument for compatibility reason
# * Implementation of Condition/Event are necessary for compatibility
# with python2.7/3.3, Barrier should be reimplemented to for those
# version (but it is not used in loky).
#
import os
import sys
import tempfile
import threading
import _multiprocessing
from time import time as _time
from .context import assert_spawning
from . import semaphore_tracker
from multiprocessing import process
from multiprocessing import util
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
if sys.version_info < (3, 4):
from .semlock import SemLock as _SemLock
from .semlock import sem_unlink
else:
from _multiprocessing import SemLock as _SemLock
from _multiprocessing import sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
if sys.version_info[:2] < (3, 3):
FileExistsError = OSError
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue):
# unlink_now is only used on win32 or when we are using fork.
unlink_now = False
for i in range(100):
try:
self._semlock = _SemLock(
kind, value, maxvalue, SemLock._make_name(),
unlink_now)
except FileExistsError: # pragma: no cover
pass
else:
break
else: # pragma: no cover
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s and name "%s"'
% (self._semlock.handle, self._semlock.name))
self._make_methods()
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
# When the object is garbage collected or the
# process shuts down we unlink the semaphore name
semaphore_tracker.register(self._semlock.name)
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
sem_unlink(name)
semaphore_tracker.unregister(name)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.acquire()
def __exit__(self, *args):
return self._semlock.release()
def __getstate__(self):
assert_spawning(self)
sl = self._semlock
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r and name "%s"'
% (state[0], state[3]))
self._make_methods()
@staticmethod
def _make_name():
# OSX does not support long names for semaphores
return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
def get_value(self):
if sys.platform == 'darwin':
raise NotImplementedError("OSX does not implement sem_getvalue")
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self):
super(Lock, self).__init__(SEMAPHORE, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self):
super(RLock, self).__init__(RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__,
self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
class Event(object):
def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
| |
from pyuntl import DC_ORDER
XSI = 'http://www.w3.org/2001/XMLSchema-instance'
# Namespaces for the DC XML.
DC_NAMESPACES = {
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
'dc': 'http://purl.org/dc/elements/1.1/',
'xsi': XSI,
}
VOCAB_INDEX = {
'coverage': {
'timePeriod': 'coverage-eras',
},
'format': {
'None': 'formats',
},
'language': 'languages',
'type': 'resource-types',
'rights': {
'access': 'rights-access',
'license': 'rights-licenses',
},
}
class DC_StructureException(Exception):
"""Base exception for the DC Python structure."""
def __init__(self, value):
self.value = value
def __str__(self):
return '%s' % (self.value,)
class DCElement(object):
"""A class for containing DC elements."""
def __init__(self, **kwargs):
"""Set all the defaults if inheriting class hasn't defined them."""
content = kwargs.get('content', None)
vocab_data = kwargs.get('vocab_data', None)
# Set the element's content.
self.content = getattr(self, 'content', content)
# Get list of allowed child elements.
self.contained_children = getattr(self, 'contained_children', [])
# Get list of child elements.
self.children = getattr(self, 'children', [])
# Get the qualifier.
qualifier = kwargs.get('qualifier', None)
# Determine the vocab from the qualifier.
self.content_vocab = self.determine_vocab(qualifier)
# If needed, resolve the value by accessing the vocabularies.
if kwargs.get('resolve_values', False) and self.content_vocab and \
vocab_data:
self.content = self.resolver(vocab_data, 'label')
elif kwargs.get('resolve_urls', False) and self.content_vocab and \
vocab_data:
self.content = self.resolver(vocab_data, 'url')
def add_child(self, child):
"""This adds a child object to the current one. It will check
the contained_children list to make sure that the object is
allowable, and throw an exception if not.
"""
# Make sure the child exists before adding it.
if child:
# Append child if it is allowed to exist under the parent.
if child.tag in self.contained_children:
self.children.append(child)
else:
raise DC_StructureException(
'Invalid child "%s" for parent "%s"' %
(child.tag, self.tag)
)
def get_child_content(self, children, element_name):
"""Get the requested element content from a list of children."""
# Loop through the children and get the specified element.
for child in children:
# If the child is the requested element, return its content.
if child.tag == element_name:
return child.content
return ''
def determine_vocab(self, qualifier):
"""Determine the vocab from the qualifier."""
vocab_value = VOCAB_INDEX.get(self.tag, None)
if isinstance(vocab_value, dict):
if qualifier is None:
qualifier = 'None'
# Find the value based on the qualifier.
return vocab_value.get(qualifier, None)
elif vocab_value is not None:
return vocab_value
else:
return None
def resolver(self, vocab_data, attribute):
"""Pull the requested attribute based on the given vocabulary
and content.
"""
term_list = vocab_data.get(self.content_vocab, [])
# Loop through the terms from the vocabulary.
for term_dict in term_list:
# Match the name to the current content.
if term_dict['name'] == self.content:
return term_dict[attribute]
return self.content
class DC(DCElement):
def __init__(self, **kwargs):
self.tag = 'dc'
self.contained_children = DC_ORDER
super(DC, self).__init__(**kwargs)
class DCCoverage(DCElement):
def __init__(self, **kwargs):
self.tag = 'coverage'
super(DCCoverage, self).__init__(**kwargs)
class DCCreator(DCElement):
def __init__(self, **kwargs):
self.tag = 'creator'
children = kwargs.get('children', [])
self.content = self.get_child_content(children, 'name')
super(DCCreator, self).__init__(**kwargs)
class DCDate(DCElement):
def __init__(self, **kwargs):
self.tag = 'date'
super(DCDate, self).__init__(**kwargs)
class DCDescription(DCElement):
def __init__(self, **kwargs):
self.tag = 'description'
super(DCDescription, self).__init__(**kwargs)
class DCFormat(DCElement):
def __init__(self, **kwargs):
self.tag = 'format'
super(DCFormat, self).__init__(**kwargs)
class DCLanguage(DCElement):
def __init__(self, **kwargs):
self.tag = 'language'
super(DCLanguage, self).__init__(**kwargs)
class DCPublisher(DCElement):
def __init__(self, **kwargs):
self.tag = 'publisher'
children = kwargs.get('children', [])
self.content = self.get_child_content(children, 'name')
super(DCPublisher, self).__init__(**kwargs)
class DCSubject(DCElement):
def __init__(self, **kwargs):
self.tag = 'subject'
super(DCSubject, self).__init__(**kwargs)
class DCTitle(DCElement):
def __init__(self, **kwargs):
self.tag = 'title'
super(DCTitle, self).__init__(**kwargs)
class DCType(DCElement):
def __init__(self, **kwargs):
self.tag = 'type'
super(DCType, self).__init__(**kwargs)
class DCIdentifier(DCElement):
def __init__(self, **kwargs):
self.tag = 'identifier'
super(DCIdentifier, self).__init__(**kwargs)
class DCContributor(DCElement):
def __init__(self, **kwargs):
self.tag = 'contributor'
children = kwargs.get('children', [])
self.content = self.get_child_content(children, 'name')
super(DCContributor, self).__init__(**kwargs)
class DCSource(DCElement):
def __init__(self, **kwargs):
self.tag = 'source'
super(DCSource, self).__init__(**kwargs)
class DCRelation(DCElement):
def __init__(self, **kwargs):
self.tag = 'relation'
super(DCRelation, self).__init__(**kwargs)
class DCRights(DCElement):
def __init__(self, **kwargs):
self.tag = 'rights'
super(DCRights, self).__init__(**kwargs)
def description_director(**kwargs):
"""Direct which class should be used based on the director
qualifier.
"""
description_type = {'physical': DCFormat}
qualifier = kwargs.get('qualifier')
# Determine the type of element needed, based on the qualifier.
element_class = description_type.get(qualifier, DCDescription)
# Create the element object of that element type.
element = element_class(
qualifier=qualifier,
content=kwargs.get('content'),
)
return element
def date_director(**kwargs):
"""Direct which class should be used based on the date qualifier
or if the date should be converted at all.
"""
# If the date is a creation date, return the element object.
if kwargs.get('qualifier') == 'creation':
return DCDate(content=kwargs.get('content'))
else:
return None
def identifier_director(**kwargs):
"""Direct how to handle the identifier element."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
# Set default scheme if it is None or is not supplied.
scheme = kwargs.get('scheme') or 'http'
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', '')
# See if the ark and domain name were given.
if ark and qualifier == 'ark':
content = 'ark: %s' % ark
if domain_name and ark and qualifier == 'permalink':
# Create the permalink URL.
if not domain_name.endswith('/'):
domain_name += '/'
permalink_url = '%s://%s%s' % (scheme, domain_name, ark)
# Make sure it has a trailing slash.
if not permalink_url.endswith('/'):
permalink_url += '/'
content = permalink_url
else:
if qualifier:
content = '%s: %s' % (qualifier.lower(), content)
return DCIdentifier(content=content)
DC_CONVERSION_DISPATCH = {
'dc': DC,
'coverage': DCCoverage,
'creator': DCCreator,
'date': date_director,
'description': description_director,
'format': DCFormat,
'language': DCLanguage,
'publisher': DCPublisher,
'subject': DCSubject,
'title': DCTitle,
'resourceType': DCType,
'identifier': identifier_director,
'contributor': DCContributor,
'source': DCSource,
'relation': DCRelation,
'rights': DCRights,
}
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RandomHouseHoldSelection.selected_households'
db.alter_column(u'survey_randomhouseholdselection', 'selected_households', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'RandomHouseHoldSelection.selected_households'
db.alter_column(u'survey_randomhouseholdselection', 'selected_households', self.gf('django.db.models.fields.CharField')(max_length=510))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_rule'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rule'", 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_max_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_min_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_rule'", 'null': 'True', 'to': "orm['survey.QuestionOption']"}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'unique_together': "(('survey', 'name'),)", 'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'non_response': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.batchquestionorder': {
'Meta': {'object_name': 'BatchQuestionOrder'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_question_order'", 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_batch_order'", 'to': "orm['survey.Question']"})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'count': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_count'", 'null': 'True', 'to': "orm['survey.Question']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_denominator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'denominator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'denominator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"}),
'groups': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'formula'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'numerator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'numerator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"})
},
'survey.groupcondition': {
'Meta': {'unique_together': "(('value', 'attribute', 'condition'),)", 'object_name': 'GroupCondition'},
'attribute': ('django.db.models.fields.CharField', [], {'default': "'AGE'", 'max_length': '20'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'EQUALS'", 'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'conditions'", 'symmetrical': 'False', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'random_sample_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_household'", 'null': 'True', 'to': "orm['survey.Survey']"}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_completed_households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead', '_ormbases': ['survey.HouseholdMember']},
u'householdmember_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['survey.HouseholdMember']", 'unique': 'True', 'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'})
},
'survey.householdmember': {
'Meta': {'object_name': 'HouseholdMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'household_member'", 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'survey.householdmemberbatchcompletion': {
'Meta': {'object_name': 'HouseholdMemberBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_member_batches'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdmembergroup': {
'Meta': {'object_name': 'HouseholdMemberGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True', 'max_length': '5'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.CharField', [], {'default': "'Percentage'", 'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator'", 'to': "orm['survey.QuestionModule']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.locationcode': {
'Meta': {'object_name': 'LocationCode'},
'code': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'code'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.locationtypedetails': {
'Meta': {'object_name': 'LocationTypeDetails'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'has_code': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'location_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'to': u"orm['locations.LocationType']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.locationweight': {
'Meta': {'object_name': 'LocationWeight'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weight'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'selection_probability': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_weight'", 'to': "orm['survey.Survey']"})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Question']"}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Question']"}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batches': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'module_question'", 'null': 'True', 'to': "orm['survey.QuestionModule']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionmodule': {
'Meta': {'object_name': 'QuestionModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.questionoption': {
'Meta': {'ordering': "['order']", 'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.TextField', [], {}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'random_household'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'has_sampling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'sample_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10', 'max_length': '2'}),
'type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Question']"}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.unknowndobattribute': {
'Meta': {'object_name': 'UnknownDOBAttribute'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unknown_dob_attribute'", 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'survey.uploaderrorlog': {
'Meta': {'object_name': 'UploadErrorLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['survey']
| |
PERMISSIONS = {
"BIND_DEVICE_ADMIN": {
"android.app.admin.DeviceAdminReceiver": [
["C", "ACTION_DEVICE_ADMIN_ENABLED", "public static final String"],
],
},
"FACTORY_TEST": {
"android.content.pm.ApplicationInfo": [
["C", "FLAG_FACTORY_TEST", "public static final int"],
["C", "flags", "public int"],
],
"android.content.Intent": [
["C", "IntentResolution", "public static final String"],
["C", "ACTION_FACTORY_TEST", "public static final String"],
],
},
"BIND_INPUT_METHOD": {
"android.view.inputmethod.InputMethod": [
["C", "SERVICE_INTERFACE", "public static final String"],
],
},
"AUTHENTICATE_ACCOUNTS": {
"android.accounts.AccountManager": [
["F",
"addAccountExplicitly(android.accounts.Account, java.lang.String, android.os.Bundle)",
"public boolean"],
["F", "getPassword(android.accounts.Account)", "public String"],
["F", "getUserData(android.accounts.Account, java.lang.String)",
"public String"],
["F", "peekAuthToken(android.accounts.Account, java.lang.String)",
"public String"],
["F",
"setAuthToken(android.accounts.Account, java.lang.String, java.lang.String)",
"public void"],
["F", "setPassword(android.accounts.Account, java.lang.String)",
"public void"],
["F",
"setUserData(android.accounts.Account, java.lang.String, java.lang.String)",
"public void"],
],
},
"INTERNET": {
"android.drm.DrmErrorEvent": [
["C", "TYPE_NO_INTERNET_CONNECTION", "public static final int"],
],
},
"RECORD_AUDIO": {
"android.net.sip.SipAudioCall": [
["F", "startAudio()", "public void"],
],
},
"ACCESS_MOCK_LOCATION": {
"android.location.LocationManager": [
["F",
"addTestProvider(java.lang.String, boolean, boolean, boolean, boolean, boolean, boolean, boolean, int, int)",
"public void"],
["F", "clearTestProviderEnabled(java.lang.String)", "public void"],
["F", "clearTestProviderLocation(java.lang.String)", "public void"],
["F", "clearTestProviderStatus(java.lang.String)", "public void"],
["F", "removeTestProvider(java.lang.String)", "public void"],
["F", "setTestProviderEnabled(java.lang.String, boolean)",
"public void"],
["F",
"setTestProviderLocation(java.lang.String, android.location.Location)",
"public void"],
["F",
"setTestProviderStatus(java.lang.String, int, android.os.Bundle, long)",
"public void"],
],
},
"VIBRATE": {
"android.provider.Settings.System": [
["C", "VIBRATE_ON", "public static final String"],
],
"android.app.Notification": [
["C", "DEFAULT_VIBRATE", "public static final int"],
["C", "defaults", "public int"],
],
"android.app.Notification.Builder": [
["F", "setDefaults(int)", "public Notification.Builder"],
],
"android.media.AudioManager": [
["C", "EXTRA_RINGER_MODE", "public static final String"],
["C", "EXTRA_VIBRATE_SETTING", "public static final String"],
["C", "EXTRA_VIBRATE_TYPE", "public static final String"],
["C", "FLAG_REMOVE_SOUND_AND_VIBRATE", "public static final int"],
["C", "FLAG_VIBRATE", "public static final int"],
["C", "RINGER_MODE_VIBRATE", "public static final int"],
["C", "VIBRATE_SETTING_CHANGED_ACTION", "public static final String"
],
["C", "VIBRATE_SETTING_OFF", "public static final int"],
["C", "VIBRATE_SETTING_ON", "public static final int"],
["C", "VIBRATE_SETTING_ONLY_SILENT", "public static final int"],
["C", "VIBRATE_TYPE_NOTIFICATION", "public static final int"],
["C", "VIBRATE_TYPE_RINGER", "public static final int"],
["F", "getRingerMode()", "public int"],
["F", "getVibrateSetting(int)", "public int"],
["F", "setRingerMode(int)", "public void"],
["F", "setVibrateSetting(int, int)", "public void"],
["F", "shouldVibrate(int)", "public boolean"],
],
},
"GLOBAL_SEARCH": {
"android.app.SearchManager": [
["C", "EXTRA_SELECT_QUERY", "public static final String"],
["C", "INTENT_ACTION_GLOBAL_SEARCH", "public static final String"],
],
},
"BROADCAST_STICKY": {
"android.content.Context": [
["F", "removeStickyBroadcast(android.content.Intent)",
"public abstract void"],
["F", "sendStickyBroadcast(android.content.Intent)",
"public abstract void"],
],
"android.content.ContextWrapper": [
["F", "removeStickyBroadcast(android.content.Intent)", "public void"
],
["F", "sendStickyBroadcast(android.content.Intent)", "public void"],
],
},
"KILL_BACKGROUND_PROCESSES": {
"android.app.ActivityManager": [
["F", "killBackgroundProcesses(java.lang.String)", "public void"],
],
},
"SET_TIME_ZONE": {
"android.app.AlarmManager": [
["F", "setTimeZone(java.lang.String)", "public void"],
],
},
"BLUETOOTH_ADMIN": {
"android.bluetooth.BluetoothAdapter": [
["F", "cancelDiscovery()", "public boolean"],
["F", "disable()", "public boolean"],
["F", "enable()", "public boolean"],
["F", "setName(java.lang.String)", "public boolean"],
["F", "startDiscovery()", "public boolean"],
],
},
"CAMERA": {
"android.hardware.Camera.ErrorCallback": [
["F", "onError(int, android.hardware.Camera)",
"public abstract void"],
],
"android.view.KeyEvent": [
["C", "KEYCODE_CAMERA", "public static final int"],
],
"android.bluetooth.BluetoothClass.Device": [
["C", "AUDIO_VIDEO_VIDEO_CAMERA", "public static final int"],
],
"android.provider.MediaStore": [
["C", "INTENT_ACTION_STILL_IMAGE_CAMERA",
"public static final String"],
["C", "INTENT_ACTION_VIDEO_CAMERA", "public static final String"],
],
"android.hardware.Camera.CameraInfo": [
["C", "CAMERA_FACING_BACK", "public static final int"],
["C", "CAMERA_FACING_FRONT", "public static final int"],
["C", "facing", "public int"],
],
"android.provider.ContactsContract.StatusColumns": [
["C", "CAPABILITY_HAS_CAMERA", "public static final int"],
],
"android.hardware.Camera.Parameters": [
["F", "setRotation(int)", "public void"],
],
"android.media.MediaRecorder.VideoSource": [
["C", "CAMERA", "public static final int"],
],
"android.content.Intent": [
["C", "IntentResolution", "public static final String"],
["C", "ACTION_CAMERA_BUTTON", "public static final String"],
],
"android.content.pm.PackageManager": [
["C", "FEATURE_CAMERA", "public static final String"],
["C", "FEATURE_CAMERA_AUTOFOCUS", "public static final String"],
["C", "FEATURE_CAMERA_FLASH", "public static final String"],
["C", "FEATURE_CAMERA_FRONT", "public static final String"],
],
"android.hardware.Camera": [
["C", "CAMERA_ERROR_SERVER_DIED", "public static final int"],
["C", "CAMERA_ERROR_UNKNOWN", "public static final int"],
["F", "setDisplayOrientation(int)", "public final void"],
],
},
"SET_WALLPAPER": {
"android.content.Intent": [
["C", "IntentResolution", "public static final String"],
["C", "ACTION_SET_WALLPAPER", "public static final String"],
],
"android.app.WallpaperManager": [
["C", "WALLPAPER_PREVIEW_META_DATA", "public static final String"],
],
},
"WAKE_LOCK": {
"android.net.sip.SipAudioCall": [
["F", "startAudio()", "public void"],
],
"android.media.MediaPlayer": [
["F", "setWakeMode(android.content.Context, int)", "public void"],
],
"android.os.PowerManager": [
["C", "ACQUIRE_CAUSES_WAKEUP", "public static final int"],
["C", "FULL_WAKE_LOCK", "public static final int"],
["C", "ON_AFTER_RELEASE", "public static final int"],
["C", "PARTIAL_WAKE_LOCK", "public static final int"],
["C", "SCREEN_BRIGHT_WAKE_LOCK", "public static final int"],
["C", "SCREEN_DIM_WAKE_LOCK", "public static final int"],
["F", "newWakeLock(int, java.lang.String)",
"public PowerManager.WakeLock"],
],
},
"MANAGE_ACCOUNTS": {
"android.accounts.AccountManager": [
["F",
"addAccount(java.lang.String, java.lang.String, java.lang.Stringcollections.deque(), android.os.Bundle, android.app.Activity, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
["F", "clearPassword(android.accounts.Account)", "public void"],
["F",
"confirmCredentials(android.accounts.Account, android.os.Bundle, android.app.Activity, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
["F",
"editProperties(java.lang.String, android.app.Activity, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
["F",
"getAuthTokenByFeatures(java.lang.String, java.lang.String, java.lang.Stringcollections.deque(), android.app.Activity, android.os.Bundle, android.os.Bundle, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
["F", "invalidateAuthToken(java.lang.String, java.lang.String)",
"public void"],
["F",
"removeAccount(android.accounts.Account, android.accounts.AccountManagerCallback<java.lang.Boolean>, android.os.Handler)",
"public AccountManagerFuture"],
["F",
"updateCredentials(android.accounts.Account, java.lang.String, android.os.Bundle, android.app.Activity, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
],
},
"NFC": {
"android.inputmethodservice.InputMethodService": [
["C", "SoftInputView", "public static final int"],
["C", "CandidatesView", "public static final int"],
["C", "FullscreenMode", "public static final int"],
["C", "GeneratingText", "public static final int"],
],
"android.nfc.tech.NfcA": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "get(android.nfc.Tag)", "public static NfcA"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
],
"android.nfc.tech.NfcB": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "get(android.nfc.Tag)", "public static NfcB"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
],
"android.nfc.NfcAdapter": [
["C", "ACTION_TECH_DISCOVERED", "public static final String"],
["F", "disableForegroundDispatch(android.app.Activity)",
"public void"],
["F", "disableForegroundNdefPush(android.app.Activity)",
"public void"],
["F",
"enableForegroundDispatch(android.app.Activity, android.app.PendingIntent, android.content.IntentFiltercollections.deque(), java.lang.Stringcollections.deque()collections.deque())",
"public void"],
["F",
"enableForegroundNdefPush(android.app.Activity, android.nfc.NdefMessage)",
"public void"],
["F", "getDefaultAdapter()", "public static NfcAdapter"],
["F", "getDefaultAdapter(android.content.Context)",
"public static NfcAdapter"],
["F", "isEnabled()", "public boolean"],
],
"android.nfc.tech.NfcF": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "get(android.nfc.Tag)", "public static NfcF"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
],
"android.nfc.tech.NdefFormatable": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "format(android.nfc.NdefMessage)", "public void"],
["F", "formatReadOnly(android.nfc.NdefMessage)", "public void"],
],
"android.app.Activity": [
["C", "Fragments", "public static final int"],
["C", "ActivityLifecycle", "public static final int"],
["C", "ConfigurationChanges", "public static final int"],
["C", "StartingActivities", "public static final int"],
["C", "SavingPersistentState", "public static final int"],
["C", "Permissions", "public static final int"],
["C", "ProcessLifecycle", "public static final int"],
],
"android.nfc.tech.MifareClassic": [
["C", "KEY_NFC_FORUM", "public static final bytecollections.deque()"],
["F", "authenticateSectorWithKeyA(int, bytecollections.deque())", "public boolean"],
["F", "authenticateSectorWithKeyB(int, bytecollections.deque())", "public boolean"],
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "decrement(int, int)", "public void"],
["F", "increment(int, int)", "public void"],
["F", "readBlock(int)", "public bytecollections.deque()"],
["F", "restore(int)", "public void"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
["F", "transfer(int)", "public void"],
["F", "writeBlock(int, bytecollections.deque())", "public void"],
],
"android.nfc.Tag": [
["F", "getTechList()", "public Stringcollections.deque()"],
],
"android.app.Service": [
["C", "WhatIsAService", "public static final int"],
["C", "ServiceLifecycle", "public static final int"],
["C", "Permissions", "public static final int"],
["C", "ProcessLifecycle", "public static final int"],
["C", "LocalServiceSample", "public static final int"],
["C", "RemoteMessengerServiceSample", "public static final int"],
],
"android.nfc.NfcManager": [
["F", "getDefaultAdapter()", "public NfcAdapter"],
],
"android.nfc.tech.MifareUltralight": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "readPages(int)", "public bytecollections.deque()"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
["F", "writePage(int, bytecollections.deque())", "public void"],
],
"android.nfc.tech.NfcV": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "get(android.nfc.Tag)", "public static NfcV"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
],
"android.nfc.tech.TagTechnology": [
["F", "close()", "public abstract void"],
["F", "connect()", "public abstract void"],
],
"android.preference.PreferenceActivity": [
["C", "SampleCode", "public static final String"],
],
"android.content.pm.PackageManager": [
["C", "FEATURE_NFC", "public static final String"],
],
"android.content.Context": [
["C", "NFC_SERVICE", "public static final String"],
],
"android.nfc.tech.Ndef": [
["C", "NFC_FORUM_TYPE_1", "public static final String"],
["C", "NFC_FORUM_TYPE_2", "public static final String"],
["C", "NFC_FORUM_TYPE_3", "public static final String"],
["C", "NFC_FORUM_TYPE_4", "public static final String"],
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "getType()", "public String"],
["F", "isWritable()", "public boolean"],
["F", "makeReadOnly()", "public boolean"],
["F", "writeNdefMessage(android.nfc.NdefMessage)", "public void"],
],
"android.nfc.tech.IsoDep": [
["F", "close()", "public void"],
["F", "connect()", "public void"],
["F", "setTimeout(int)", "public void"],
["F", "transceive(bytecollections.deque())", "public bytecollections.deque()"],
],
},
"ACCESS_FINE_LOCATION": {
"android.telephony.TelephonyManager": [
["F", "getCellLocation()", "public CellLocation"],
],
"android.location.LocationManager": [
["C", "GPS_PROVIDER", "public static final String"],
["C", "NETWORK_PROVIDER", "public static final String"],
["C", "PASSIVE_PROVIDER", "public static final String"],
["F", "addGpsStatusListener(android.location.GpsStatus.Listener)",
"public boolean"],
["F", "addNmeaListener(android.location.GpsStatus.NmeaListener)",
"public boolean"],
],
},
"REORDER_TASKS": {
"android.app.ActivityManager": [
["F", "moveTaskToFront(int, int)", "public void"],
],
},
"MODIFY_AUDIO_SETTINGS": {
"android.net.sip.SipAudioCall": [
["F", "setSpeakerMode(boolean)", "public void"],
],
"android.media.AudioManager": [
["F", "startBluetoothSco()", "public void"],
["F", "stopBluetoothSco()", "public void"],
],
},
"READ_PHONE_STATE": {
"android.telephony.TelephonyManager": [
["C", "ACTION_PHONE_STATE_CHANGED", "public static final String"],
["F", "getDeviceId()", "public String"],
["F", "getDeviceSoftwareVersion()", "public String"],
["F", "getLine1Number()", "public String"],
["F", "getSimSerialNumber()", "public String"],
["F", "getSubscriberId()", "public String"],
["F", "getVoiceMailAlphaTag()", "public String"],
["F", "getVoiceMailNumber()", "public String"],
],
"android.telephony.PhoneStateListener": [
["C", "LISTEN_CALL_FORWARDING_INDICATOR", "public static final int"
],
["C", "LISTEN_CALL_STATE", "public static final int"],
["C", "LISTEN_DATA_ACTIVITY", "public static final int"],
["C", "LISTEN_MESSAGE_WAITING_INDICATOR", "public static final int"
],
["C", "LISTEN_SIGNAL_STRENGTH", "public static final int"],
],
"android.os.Build.VERSION_CODES": [
["C", "DONUT", "public static final int"],
],
},
"BIND_WALLPAPER": {
"android.service.wallpaper.WallpaperService": [
["C", "SERVICE_INTERFACE", "public static final String"],
],
},
"DUMP": {
"android.os.Debug": [
["F",
"dumpService(java.lang.String, java.io.FileDescriptor, java.lang.Stringcollections.deque())",
"public static boolean"],
],
"android.os.IBinder": [
["C", "DUMP_TRANSACTION", "public static final int"],
],
},
"USE_CREDENTIALS": {
"android.accounts.AccountManager": [
["F",
"blockingGetAuthToken(android.accounts.Account, java.lang.String, boolean)",
"public String"],
["F",
"getAuthToken(android.accounts.Account, java.lang.String, android.os.Bundle, android.app.Activity, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
["F",
"getAuthToken(android.accounts.Account, java.lang.String, boolean, android.accounts.AccountManagerCallback<android.os.Bundle>, android.os.Handler)",
"public AccountManagerFuture"],
["F", "invalidateAuthToken(java.lang.String, java.lang.String)",
"public void"],
],
},
"ACCESS_COARSE_LOCATION": {
"android.telephony.TelephonyManager": [
["F", "getCellLocation()", "public CellLocation"],
],
"android.telephony.PhoneStateListener": [
["C", "LISTEN_CELL_LOCATION", "public static final int"],
],
"android.location.LocationManager": [
["C", "NETWORK_PROVIDER", "public static final String"],
],
},
"RECEIVE_BOOT_COMPLETED": {
"android.content.Intent": [
["C", "ACTION_BOOT_COMPLETED", "public static final String"],
],
},
"SET_ALARM": {
"android.provider.AlarmClock": [
["C", "ACTION_SET_ALARM", "public static final String"],
["C", "EXTRA_HOUR", "public static final String"],
["C", "EXTRA_MESSAGE", "public static final String"],
["C", "EXTRA_MINUTES", "public static final String"],
["C", "EXTRA_SKIP_UI", "public static final String"],
],
},
"PROCESS_OUTGOING_CALLS": {
"android.content.Intent": [
["C", "ACTION_NEW_OUTGOING_CALL", "public static final String"],
],
},
"GET_TASKS": {
"android.app.ActivityManager": [
["F", "getRecentTasks(int, int)", "public List"],
["F", "getRunningTasks(int)", "public List"],
],
},
"SET_TIME": {
"android.app.AlarmManager": [
["F", "setTime(long)", "public void"],
["F", "setTimeZone(java.lang.String)", "public void"],
],
},
"ACCESS_WIFI_STATE": {
"android.net.sip.SipAudioCall": [
["F", "startAudio()", "public void"],
],
},
"READ_HISTORY_BOOKMARKS": {
"android.provider.Browser": [
["C", "BOOKMARKS_URI", "public static final Uri"],
["C", "SEARCHES_URI", "public static final Uri"],
["F",
"addSearchUrl(android.content.ContentResolver, java.lang.String)",
"public static final void"],
["F", "canClearHistory(android.content.ContentResolver)",
"public static final boolean"],
["F", "getAllBookmarks(android.content.ContentResolver)",
"public static final Cursor"],
["F", "getAllVisitedUrls(android.content.ContentResolver)",
"public static final Cursor"],
["F",
"requestAllIcons(android.content.ContentResolver, java.lang.String, android.webkit.WebIconDatabase.IconListener)",
"public static final void"],
["F", "truncateHistory(android.content.ContentResolver)",
"public static final void"],
["F",
"updateVisitedHistory(android.content.ContentResolver, java.lang.String, boolean)",
"public static final void"],
],
},
"STATUS_BAR": {
"android.view.View.OnSystemUiVisibilityChangeListener": [
["F", "onSystemUiVisibilityChange(int)", "public abstract void"],
],
"android.view.View": [
["C", "STATUS_BAR_HIDDEN", "public static final int"],
["C", "STATUS_BAR_VISIBLE", "public static final int"],
],
"android.view.WindowManager.LayoutParams": [
["C", "TYPE_STATUS_BAR", "public static final int"],
["C", "TYPE_STATUS_BAR_PANEL", "public static final int"],
["C", "systemUiVisibility", "public int"],
["C", "type", "public int"],
],
},
"READ_LOGS": {
"android.os.DropBoxManager": [
["C", "ACTION_DROPBOX_ENTRY_ADDED", "public static final String"],
["F", "getNextEntry(java.lang.String, long)",
"public DropBoxManager.Entry"],
],
},
"BLUETOOTH": {
"android.os.Process": [
["C", "BLUETOOTH_GID", "public static final int"],
],
"android.content.pm.PackageManager": [
["C", "FEATURE_BLUETOOTH", "public static final String"],
],
"android.media.AudioManager": [
["C", "ROUTE_BLUETOOTH", "public static final int"],
["C", "ROUTE_BLUETOOTH_A2DP", "public static final int"],
["C", "ROUTE_BLUETOOTH_SCO", "public static final int"],
],
"android.provider.Settings.System": [
["C", "AIRPLANE_MODE_RADIOS", "public static final String"],
["C", "BLUETOOTH_DISCOVERABILITY", "public static final String"],
["C", "BLUETOOTH_DISCOVERABILITY_TIMEOUT",
"public static final String"],
["C", "BLUETOOTH_ON", "public static final String"],
["C", "RADIO_BLUETOOTH", "public static final String"],
["C", "VOLUME_BLUETOOTH_SCO", "public static final String"],
],
"android.provider.Settings": [
["C", "ACTION_BLUETOOTH_SETTINGS", "public static final String"],
],
"android.bluetooth.BluetoothAdapter": [
["C", "ACTION_CONNECTION_STATE_CHANGED",
"public static final String"],
["C", "ACTION_DISCOVERY_FINISHED", "public static final String"],
["C", "ACTION_DISCOVERY_STARTED", "public static final String"],
["C", "ACTION_LOCAL_NAME_CHANGED", "public static final String"],
["C", "ACTION_REQUEST_DISCOVERABLE", "public static final String"],
["C", "ACTION_REQUEST_ENABLE", "public static final String"],
["C", "ACTION_SCAN_MODE_CHANGED", "public static final String"],
["C", "ACTION_STATE_CHANGED", "public static final String"],
["F", "cancelDiscovery()", "public boolean"],
["F", "disable()", "public boolean"],
["F", "enable()", "public boolean"],
["F", "getAddress()", "public String"],
["F", "getBondedDevices()", "public Set"],
["F", "getName()", "public String"],
["F", "getScanMode()", "public int"],
["F", "getState()", "public int"],
["F", "isDiscovering()", "public boolean"],
["F", "isEnabled()", "public boolean"],
["F",
"listenUsingInsecureRfcommWithServiceRecord(java.lang.String, java.util.UUID)",
"public BluetoothServerSocket"],
["F",
"listenUsingRfcommWithServiceRecord(java.lang.String, java.util.UUID)",
"public BluetoothServerSocket"],
["F", "setName(java.lang.String)", "public boolean"],
["F", "startDiscovery()", "public boolean"],
],
"android.bluetooth.BluetoothProfile": [
["F", "getConnectedDevices()", "public abstract List"],
["F", "getConnectionState(android.bluetooth.BluetoothDevice)",
"public abstract int"],
["F", "getDevicesMatchingConnectionStates(intcollections.deque())",
"public abstract List"],
],
"android.bluetooth.BluetoothHeadset": [
["C", "ACTION_AUDIO_STATE_CHANGED", "public static final String"],
["C", "ACTION_CONNECTION_STATE_CHANGED",
"public static final String"],
["C", "ACTION_VENDOR_SPECIFIC_HEADSET_EVENT",
"public static final String"],
["F", "getConnectedDevices()", "public List"],
["F", "getConnectionState(android.bluetooth.BluetoothDevice)",
"public int"],
["F", "getDevicesMatchingConnectionStates(intcollections.deque())", "public List"],
["F", "isAudioConnected(android.bluetooth.BluetoothDevice)",
"public boolean"],
["F", "startVoiceRecognition(android.bluetooth.BluetoothDevice)",
"public boolean"],
["F", "stopVoiceRecognition(android.bluetooth.BluetoothDevice)",
"public boolean"],
],
"android.bluetooth.BluetoothDevice": [
["C", "ACTION_ACL_CONNECTED", "public static final String"],
["C", "ACTION_ACL_DISCONNECTED", "public static final String"],
["C", "ACTION_ACL_DISCONNECT_REQUESTED",
"public static final String"],
["C", "ACTION_BOND_STATE_CHANGED", "public static final String"],
["C", "ACTION_CLASS_CHANGED", "public static final String"],
["C", "ACTION_FOUND", "public static final String"],
["C", "ACTION_NAME_CHANGED", "public static final String"],
["F", "createInsecureRfcommSocketToServiceRecord(java.util.UUID)",
"public BluetoothSocket"],
["F", "createRfcommSocketToServiceRecord(java.util.UUID)",
"public BluetoothSocket"],
["F", "getBluetoothClass()", "public BluetoothClass"],
["F", "getBondState()", "public int"],
["F", "getName()", "public String"],
],
"android.provider.Settings.Secure": [
["C", "BLUETOOTH_ON", "public static final String"],
],
"android.bluetooth.BluetoothA2dp": [
["C", "ACTION_CONNECTION_STATE_CHANGED",
"public static final String"],
["C", "ACTION_PLAYING_STATE_CHANGED", "public static final String"],
["F", "getConnectedDevices()", "public List"],
["F", "getConnectionState(android.bluetooth.BluetoothDevice)",
"public int"],
["F", "getDevicesMatchingConnectionStates(intcollections.deque())", "public List"],
["F", "isA2dpPlaying(android.bluetooth.BluetoothDevice)",
"public boolean"],
],
"android.bluetooth.BluetoothAssignedNumbers": [
["C", "BLUETOOTH_SIG", "public static final int"],
],
},
"WRITE_HISTORY_BOOKMARKS": {
"android.provider.Browser": [
["C", "BOOKMARKS_URI", "public static final Uri"],
["C", "SEARCHES_URI", "public static final Uri"],
["F",
"addSearchUrl(android.content.ContentResolver, java.lang.String)",
"public static final void"],
["F", "clearHistory(android.content.ContentResolver)",
"public static final void"],
["F", "clearSearches(android.content.ContentResolver)",
"public static final void"],
["F",
"deleteFromHistory(android.content.ContentResolver, java.lang.String)",
"public static final void"],
["F",
"deleteHistoryTimeFrame(android.content.ContentResolver, long, long)",
"public static final void"],
["F", "truncateHistory(android.content.ContentResolver)",
"public static final void"],
["F",
"updateVisitedHistory(android.content.ContentResolver, java.lang.String, boolean)",
"public static final void"],
],
},
"ACCOUNT_MANAGER": {
"android.accounts.AccountManager": [
["C", "KEY_ACCOUNT_MANAGER_RESPONSE", "public static final String"],
],
},
"GET_ACCOUNTS": {
"android.accounts.AccountManager": [
["F", "getAccounts()", "public Accountcollections.deque()"],
["F", "getAccountsByType(java.lang.String)", "public Accountcollections.deque()"],
["F",
"getAccountsByTypeAndFeatures(java.lang.String, java.lang.Stringcollections.deque(), android.accounts.AccountManagerCallback<android.accounts.Accountcollections.deque()>, android.os.Handler)",
"public AccountManagerFuture"],
["F",
"hasFeatures(android.accounts.Account, java.lang.Stringcollections.deque(), android.accounts.AccountManagerCallback<java.lang.Boolean>, android.os.Handler)",
"public AccountManagerFuture"],
],
},
"WRITE_EXTERNAL_STORAGE": {
"android.os.Build.VERSION_CODES": [
["C", "DONUT", "public static final int"],
],
"android.app.DownloadManager.Request": [
["F", "setDestinationUri(android.net.Uri)",
"public DownloadManager.Request"],
],
},
"REBOOT": {
"android.os.RecoverySystem": [
["F", "installPackage(android.content.Context, java.io.File)",
"public static void"],
["F", "rebootWipeUserData(android.content.Context)",
"public static void"],
],
"android.content.Intent": [
["C", "IntentResolution", "public static final String"],
["C", "ACTION_REBOOT", "public static final String"],
],
"android.os.PowerManager": [
["F", "reboot(java.lang.String)", "public void"],
],
},
}
| |
"""
Matrix/Synapse custom authentication provider backend.
This allows a Matrix/Synapse installation to use a custom backaned (not part of
this API) to authenticate users against epcon database.
The main (and currently the only) endpoint is
/api/v1/isauth
For more information about developing a custom auth backend for matrix/synapse
please refer to https://github.com/matrix-org/synapse/blob/master/docs/\
password_auth_providers.md
"""
from enum import Enum
import json
from functools import wraps
from hashlib import md5
from django.conf.urls import url as re_path
from django.contrib.auth.hashers import check_password as django_check_password
from django.contrib.auth.hashers import is_password_usable
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from conference.models import (
AttendeeProfile,
Conference,
Speaker,
TalkSpeaker,
Ticket,
)
from pycon.settings import MATRIX_AUTH_API_DEBUG as DEBUG
from pycon.settings import MATRIX_AUTH_API_ALLOWED_IPS as ALLOWED_IPS
from pycon.settings import SECRET_KEY
# Error Codes
class ApiError(Enum):
WRONG_METHOD = 1
AUTH_ERROR = 2
INPUT_ERROR = 3
UNAUTHORIZED = 4
WRONG_SCHEME = 5
BAD_REQUEST = 6
def _error(error: ApiError, msg: str) -> JsonResponse:
return JsonResponse({
'error': error.value,
'message': f'{error.name}: {msg}'
})
def get_client_ip(request) -> str:
"""
Return the client IP.
This is a best effort way of fetching the client IP which does not protect
against spoofing and hich tries to understand some proxying.
This should NOT be relied upon for serius stuff.
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
# Checkers
def request_checker(checker, error_msg):
"""
Generic sanity check decorator on views.
It accepts two parameters:
`checker`: a function that accepts a request and returns True if valid
`error_msg`: what to return as error message if request is invalid
In case of invalid requests, it returns a BAD_REQUEST error.
"""
def decorator(fn):
@wraps(fn)
def wrapper(request, *args, **kwargs):
if not checker(request):
return _error(ApiError.BAD_REQUEST, error_msg)
return fn(request, *args, **kwargs)
return wrapper
return decorator
# Ensure that the view is called via an HTTPS request and return a JSON error
# payload if not. If DEBUG = True, it has no effect.
ensure_https_in_ops = request_checker(
lambda r: DEBUG or r.is_secure(), 'please use HTTPS'
)
# We use this instead of the bult-in decorator to return a JSON error
# payload instead of a simple 405.
ensure_post = request_checker(lambda r: r.method == 'POST', 'please use POST')
ensure_json_content_type = request_checker(
lambda r: r.content_type == 'application/json', 'please send JSON'
)
def restrict_client_ip_to_allowed_list(fn):
@wraps(fn)
def wrapper(request, *args, **kwargs):
# This is really a best effort attempt at detecting the client IP. It
# does NOT handle IP spooding or any similar attack.
best_effort_ip = get_client_ip(request)
if ALLOWED_IPS and best_effort_ip not in ALLOWED_IPS:
return _error(ApiError.UNAUTHORIZED, 'you are not authorized here')
return fn(request, *args, **kwargs)
return wrapper
def check_user_password(user, password):
# Two options: either our User has a valid password, in which case we do
# check it, or not, in which case we check it against the generated passwd.
if not is_password_usable(user.password):
return password == generate_matrix_password(user)
return django_check_password(password, user.password)
def get_assigned_tickets(user, conference):
return Ticket.objects.filter(
Q(fare__conference=conference.code)
& Q(frozen=False) # i.e. the ticket was not cancelled
& Q(orderitem__order___complete=True) # i.e. they paid
& Q(user=user) # i.e. assigned to user
)
def is_speaker(user, conference):
# A speaker is a user with at least one accepted talk in the current
# conference.
try:
speaker = user.speaker
except Speaker.DoesNotExist:
return False
return TalkSpeaker.objects.filter(
speaker=speaker,
talk__conference=conference.code,
talk__status='accepted'
).count() > 0
def generate_matrix_password(user):
"""
Create a temporary password for `user` to that they can login into our
matrix chat server using their email address and that password. This is
only needed for social auth users since they do not have a valid password
in our database.
The generated passowrd is not stored anywhere.
"""
def n_base_b(n, b, nums='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Return `n` in base `b`."""
return ((n == 0) and nums[0]) or \
(n_base_b(n // b, b, nums).lstrip(nums[0]) + nums[n % b])
encoded = md5(str(user.email + SECRET_KEY).encode()).hexdigest()
n = int(encoded, 16)
return n_base_b(n, 36)
@csrf_exempt
@ensure_post
@ensure_https_in_ops
@ensure_json_content_type
@restrict_client_ip_to_allowed_list
def isauth(request):
"""
Return whether or not the given email and password (sent via POST) are
valid. If they are indeed valid, return the number and type of tickets
assigned to the user, together with some other user metadata (see below).
Input via POST:
{
"email": str,
"password": str (not encrypted)
}
or
{
"username": str,
"password": str (not encrypted)
}
Output (JSON)
{
"username": str,
"first_name": str,
"last_name": str,
"email": str,
"is_staff": bool,
"is_speaker": bool,
"is_active": bool,
"is_minor": bool,
"tickets": [{"fare_name": str, "fare_code": str}*]
}
Tickets, if any, are returned only for the currently active conference and
only if ASSIGNED to the user identified by `email`.
In case of any error (including but not limited to if either email or
password are incorrect/unknown), return
{
"message": str,
"error": int
}
"""
try:
data = json.loads(request.body)
except json.decoder.JSONDecodeError as ex:
return _error(ApiError.INPUT_ERROR, ex.msg)
if not isinstance(data, dict):
return _error(ApiError.INPUT_ERROR,
'please provide credentials in JSON format')
if 'password' not in data:
return _error(ApiError.INPUT_ERROR,
'please provide user password in JSON payload')
if 'username' not in data and 'email' not in data:
return _error(ApiError.INPUT_ERROR,
'please provide username or email in JSON payload')
# First, let's find the user/account profile given the email/username as
# appropriate.
if 'email' in data:
try:
profile = AttendeeProfile.objects.get(user__email=data['email'])
except AttendeeProfile.DoesNotExist:
return _error(ApiError.AUTH_ERROR, 'unknown user')
elif 'username' in data:
# Here we could have an issue: the username could be coming from Matrix
# and could have been sanitized, accodring to some Matrix specific
# rules. The most common one is that the epcon username is a number,
# which Matriox does not like. What we do in Matrix is prepending a "g"
# to it (because it comes from folks with Google auth mostly). Try
# that.
if data['username'].startswith('g') and data['username'][1:].isdigit():
data['username'] = data['username'][1:]
try:
profile = AttendeeProfile.objects.get(
user__username=data['username']
)
except AttendeeProfile.DoesNotExist:
return _error(ApiError.AUTH_ERROR, 'unknown user')
else:
return _error(ApiError.INPUT_ERROR, 'no email/username provided')
# Is the password OK?
if not check_user_password(profile.user, data['password']):
return _error(ApiError.AUTH_ERROR, 'authentication error')
conference = Conference.objects.current()
payload = {
"username": profile.user.username,
"first_name": profile.user.first_name,
"last_name": profile.user.last_name,
"email": profile.user.email,
"is_staff": profile.user.is_staff,
"is_speaker": is_speaker(profile.user, conference),
"is_active": profile.user.is_active,
"is_minor": profile.is_minor,
"tickets": [
{"fare_name": t.fare.name, "fare_code": t.fare.code}
for t in get_assigned_tickets(profile.user, conference)
]
}
# Just a little nice to have thing when debugging: we can send in the POST
# payload, all the fields that we want to override in the answer and they
# will just be passed through regardless of what is in the DB. We just
# remove the password to be on the safe side.
if DEBUG:
data.pop('password')
payload.update(data)
return JsonResponse(payload)
urlpatterns = [
re_path(r"^v1/isauth/$", isauth, name="isauth"),
]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
from .proxy_resource import ProxyResource
from .backup_long_term_retention_policy import BackupLongTermRetentionPolicy
from .backup_long_term_retention_vault import BackupLongTermRetentionVault
from .tracked_resource import TrackedResource
from .restore_point import RestorePoint
from .recoverable_database import RecoverableDatabase
from .restorable_dropped_database import RestorableDroppedDatabase
from .max_size_capability import MaxSizeCapability
from .service_objective_capability import ServiceObjectiveCapability
from .edition_capability import EditionCapability
from .elastic_pool_per_database_min_dtu_capability import ElasticPoolPerDatabaseMinDtuCapability
from .elastic_pool_per_database_max_dtu_capability import ElasticPoolPerDatabaseMaxDtuCapability
from .elastic_pool_dtu_capability import ElasticPoolDtuCapability
from .elastic_pool_edition_capability import ElasticPoolEditionCapability
from .server_version_capability import ServerVersionCapability
from .location_capabilities import LocationCapabilities
from .server_connection_policy import ServerConnectionPolicy
from .database_security_alert_policy import DatabaseSecurityAlertPolicy
from .data_masking_policy import DataMaskingPolicy
from .data_masking_rule import DataMaskingRule
from .firewall_rule import FirewallRule
from .geo_backup_policy import GeoBackupPolicy
from .import_extension_request import ImportExtensionRequest
from .import_export_response import ImportExportResponse
from .import_request import ImportRequest
from .export_request import ExportRequest
from .metric_value import MetricValue
from .metric_name import MetricName
from .metric import Metric
from .metric_availability import MetricAvailability
from .metric_definition import MetricDefinition
from .replication_link import ReplicationLink
from .server_azure_ad_administrator import ServerAzureADAdministrator
from .server_communication_link import ServerCommunicationLink
from .service_objective import ServiceObjective
from .check_name_availability_request import CheckNameAvailabilityRequest
from .check_name_availability_response import CheckNameAvailabilityResponse
from .recommended_elastic_pool_metric import RecommendedElasticPoolMetric
from .slo_usage_metric import SloUsageMetric
from .service_tier_advisor import ServiceTierAdvisor
from .transparent_data_encryption import TransparentDataEncryption
from .operation_impact import OperationImpact
from .recommended_index import RecommendedIndex
from .database import Database
from .recommended_elastic_pool import RecommendedElasticPool
from .elastic_pool import ElasticPool
from .elastic_pool_update import ElasticPoolUpdate
from .elastic_pool_activity import ElasticPoolActivity
from .elastic_pool_database_activity import ElasticPoolDatabaseActivity
from .database_update import DatabaseUpdate
from .transparent_data_encryption_activity import TransparentDataEncryptionActivity
from .server_usage import ServerUsage
from .database_usage import DatabaseUsage
from .database_blob_auditing_policy import DatabaseBlobAuditingPolicy
from .encryption_protector import EncryptionProtector
from .failover_group_read_write_endpoint import FailoverGroupReadWriteEndpoint
from .failover_group_read_only_endpoint import FailoverGroupReadOnlyEndpoint
from .partner_info import PartnerInfo
from .failover_group import FailoverGroup
from .failover_group_update import FailoverGroupUpdate
from .operation_display import OperationDisplay
from .operation import Operation
from .server_key import ServerKey
from .resource_identity import ResourceIdentity
from .server import Server
from .server_update import ServerUpdate
from .sync_agent import SyncAgent
from .sync_agent_key_properties import SyncAgentKeyProperties
from .sync_agent_linked_database import SyncAgentLinkedDatabase
from .sync_database_id_properties import SyncDatabaseIdProperties
from .sync_full_schema_table_column import SyncFullSchemaTableColumn
from .sync_full_schema_table import SyncFullSchemaTable
from .sync_full_schema_properties import SyncFullSchemaProperties
from .sync_group_log_properties import SyncGroupLogProperties
from .sync_group_schema_table_column import SyncGroupSchemaTableColumn
from .sync_group_schema_table import SyncGroupSchemaTable
from .sync_group_schema import SyncGroupSchema
from .sync_group import SyncGroup
from .sync_member import SyncMember
from .subscription_usage import SubscriptionUsage
from .virtual_network_rule import VirtualNetworkRule
from .database_operation import DatabaseOperation
from .resource_move_definition import ResourceMoveDefinition
from .server_dns_alias import ServerDnsAlias
from .server_dns_alias_acquisition import ServerDnsAliasAcquisition
from .backup_long_term_retention_policy_paged import BackupLongTermRetentionPolicyPaged
from .backup_long_term_retention_vault_paged import BackupLongTermRetentionVaultPaged
from .restore_point_paged import RestorePointPaged
from .recoverable_database_paged import RecoverableDatabasePaged
from .restorable_dropped_database_paged import RestorableDroppedDatabasePaged
from .data_masking_rule_paged import DataMaskingRulePaged
from .firewall_rule_paged import FirewallRulePaged
from .geo_backup_policy_paged import GeoBackupPolicyPaged
from .metric_paged import MetricPaged
from .metric_definition_paged import MetricDefinitionPaged
from .database_paged import DatabasePaged
from .elastic_pool_paged import ElasticPoolPaged
from .replication_link_paged import ReplicationLinkPaged
from .server_azure_ad_administrator_paged import ServerAzureADAdministratorPaged
from .server_communication_link_paged import ServerCommunicationLinkPaged
from .service_objective_paged import ServiceObjectivePaged
from .server_paged import ServerPaged
from .elastic_pool_activity_paged import ElasticPoolActivityPaged
from .elastic_pool_database_activity_paged import ElasticPoolDatabaseActivityPaged
from .recommended_elastic_pool_paged import RecommendedElasticPoolPaged
from .recommended_elastic_pool_metric_paged import RecommendedElasticPoolMetricPaged
from .service_tier_advisor_paged import ServiceTierAdvisorPaged
from .transparent_data_encryption_activity_paged import TransparentDataEncryptionActivityPaged
from .server_usage_paged import ServerUsagePaged
from .database_usage_paged import DatabaseUsagePaged
from .encryption_protector_paged import EncryptionProtectorPaged
from .failover_group_paged import FailoverGroupPaged
from .operation_paged import OperationPaged
from .server_key_paged import ServerKeyPaged
from .sync_agent_paged import SyncAgentPaged
from .sync_agent_linked_database_paged import SyncAgentLinkedDatabasePaged
from .sync_database_id_properties_paged import SyncDatabaseIdPropertiesPaged
from .sync_full_schema_properties_paged import SyncFullSchemaPropertiesPaged
from .sync_group_log_properties_paged import SyncGroupLogPropertiesPaged
from .sync_group_paged import SyncGroupPaged
from .sync_member_paged import SyncMemberPaged
from .subscription_usage_paged import SubscriptionUsagePaged
from .virtual_network_rule_paged import VirtualNetworkRulePaged
from .database_operation_paged import DatabaseOperationPaged
from .server_dns_alias_paged import ServerDnsAliasPaged
from .sql_management_client_enums import (
BackupLongTermRetentionPolicyState,
RestorePointType,
CapabilityStatus,
MaxSizeUnits,
PerformanceLevelUnit,
ServerConnectionType,
SecurityAlertPolicyState,
SecurityAlertPolicyEmailAccountAdmins,
SecurityAlertPolicyUseServerDefault,
DataMaskingState,
DataMaskingRuleState,
DataMaskingFunction,
GeoBackupPolicyState,
DatabaseEdition,
ServiceObjectiveName,
StorageKeyType,
AuthenticationType,
UnitType,
PrimaryAggregationType,
UnitDefinitionType,
ReplicationRole,
ReplicationState,
CheckNameAvailabilityReason,
ElasticPoolEdition,
CreateMode,
TransparentDataEncryptionStatus,
RecommendedIndexAction,
RecommendedIndexState,
RecommendedIndexType,
ReadScale,
SampleName,
ElasticPoolState,
TransparentDataEncryptionActivityStatus,
BlobAuditingPolicyState,
ServerKeyType,
ReadWriteEndpointFailoverPolicy,
ReadOnlyEndpointFailoverPolicy,
FailoverGroupReplicationRole,
OperationOrigin,
IdentityType,
SyncAgentState,
SyncMemberDbType,
SyncGroupLogType,
SyncConflictResolutionPolicy,
SyncGroupState,
SyncDirection,
SyncMemberState,
VirtualNetworkRuleState,
ManagementOperationState,
)
__all__ = [
'Resource',
'ProxyResource',
'BackupLongTermRetentionPolicy',
'BackupLongTermRetentionVault',
'TrackedResource',
'RestorePoint',
'RecoverableDatabase',
'RestorableDroppedDatabase',
'MaxSizeCapability',
'ServiceObjectiveCapability',
'EditionCapability',
'ElasticPoolPerDatabaseMinDtuCapability',
'ElasticPoolPerDatabaseMaxDtuCapability',
'ElasticPoolDtuCapability',
'ElasticPoolEditionCapability',
'ServerVersionCapability',
'LocationCapabilities',
'ServerConnectionPolicy',
'DatabaseSecurityAlertPolicy',
'DataMaskingPolicy',
'DataMaskingRule',
'FirewallRule',
'GeoBackupPolicy',
'ImportExtensionRequest',
'ImportExportResponse',
'ImportRequest',
'ExportRequest',
'MetricValue',
'MetricName',
'Metric',
'MetricAvailability',
'MetricDefinition',
'ReplicationLink',
'ServerAzureADAdministrator',
'ServerCommunicationLink',
'ServiceObjective',
'CheckNameAvailabilityRequest',
'CheckNameAvailabilityResponse',
'RecommendedElasticPoolMetric',
'SloUsageMetric',
'ServiceTierAdvisor',
'TransparentDataEncryption',
'OperationImpact',
'RecommendedIndex',
'Database',
'RecommendedElasticPool',
'ElasticPool',
'ElasticPoolUpdate',
'ElasticPoolActivity',
'ElasticPoolDatabaseActivity',
'DatabaseUpdate',
'TransparentDataEncryptionActivity',
'ServerUsage',
'DatabaseUsage',
'DatabaseBlobAuditingPolicy',
'EncryptionProtector',
'FailoverGroupReadWriteEndpoint',
'FailoverGroupReadOnlyEndpoint',
'PartnerInfo',
'FailoverGroup',
'FailoverGroupUpdate',
'OperationDisplay',
'Operation',
'ServerKey',
'ResourceIdentity',
'Server',
'ServerUpdate',
'SyncAgent',
'SyncAgentKeyProperties',
'SyncAgentLinkedDatabase',
'SyncDatabaseIdProperties',
'SyncFullSchemaTableColumn',
'SyncFullSchemaTable',
'SyncFullSchemaProperties',
'SyncGroupLogProperties',
'SyncGroupSchemaTableColumn',
'SyncGroupSchemaTable',
'SyncGroupSchema',
'SyncGroup',
'SyncMember',
'SubscriptionUsage',
'VirtualNetworkRule',
'DatabaseOperation',
'ResourceMoveDefinition',
'ServerDnsAlias',
'ServerDnsAliasAcquisition',
'BackupLongTermRetentionPolicyPaged',
'BackupLongTermRetentionVaultPaged',
'RestorePointPaged',
'RecoverableDatabasePaged',
'RestorableDroppedDatabasePaged',
'DataMaskingRulePaged',
'FirewallRulePaged',
'GeoBackupPolicyPaged',
'MetricPaged',
'MetricDefinitionPaged',
'DatabasePaged',
'ElasticPoolPaged',
'ReplicationLinkPaged',
'ServerAzureADAdministratorPaged',
'ServerCommunicationLinkPaged',
'ServiceObjectivePaged',
'ServerPaged',
'ElasticPoolActivityPaged',
'ElasticPoolDatabaseActivityPaged',
'RecommendedElasticPoolPaged',
'RecommendedElasticPoolMetricPaged',
'ServiceTierAdvisorPaged',
'TransparentDataEncryptionActivityPaged',
'ServerUsagePaged',
'DatabaseUsagePaged',
'EncryptionProtectorPaged',
'FailoverGroupPaged',
'OperationPaged',
'ServerKeyPaged',
'SyncAgentPaged',
'SyncAgentLinkedDatabasePaged',
'SyncDatabaseIdPropertiesPaged',
'SyncFullSchemaPropertiesPaged',
'SyncGroupLogPropertiesPaged',
'SyncGroupPaged',
'SyncMemberPaged',
'SubscriptionUsagePaged',
'VirtualNetworkRulePaged',
'DatabaseOperationPaged',
'ServerDnsAliasPaged',
'BackupLongTermRetentionPolicyState',
'RestorePointType',
'CapabilityStatus',
'MaxSizeUnits',
'PerformanceLevelUnit',
'ServerConnectionType',
'SecurityAlertPolicyState',
'SecurityAlertPolicyEmailAccountAdmins',
'SecurityAlertPolicyUseServerDefault',
'DataMaskingState',
'DataMaskingRuleState',
'DataMaskingFunction',
'GeoBackupPolicyState',
'DatabaseEdition',
'ServiceObjectiveName',
'StorageKeyType',
'AuthenticationType',
'UnitType',
'PrimaryAggregationType',
'UnitDefinitionType',
'ReplicationRole',
'ReplicationState',
'CheckNameAvailabilityReason',
'ElasticPoolEdition',
'CreateMode',
'TransparentDataEncryptionStatus',
'RecommendedIndexAction',
'RecommendedIndexState',
'RecommendedIndexType',
'ReadScale',
'SampleName',
'ElasticPoolState',
'TransparentDataEncryptionActivityStatus',
'BlobAuditingPolicyState',
'ServerKeyType',
'ReadWriteEndpointFailoverPolicy',
'ReadOnlyEndpointFailoverPolicy',
'FailoverGroupReplicationRole',
'OperationOrigin',
'IdentityType',
'SyncAgentState',
'SyncMemberDbType',
'SyncGroupLogType',
'SyncConflictResolutionPolicy',
'SyncGroupState',
'SyncDirection',
'SyncMemberState',
'VirtualNetworkRuleState',
'ManagementOperationState',
]
| |
__author__ = 'Ostico <ostico@gmail.com>'
import sys
import os
import unittest
from pyorient.exceptions import *
from pyorient import OrientSocket
from pyorient import OrientRecord
from pyorient.messages.database import *
from pyorient.messages.commands import *
from pyorient.messages.cluster import *
from pyorient.messages.records import *
from pyorient.constants import DB_TYPE_DOCUMENT, QUERY_SYNC, \
STORAGE_TYPE_PLOCAL, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY
os.environ['DEBUG'] = "0"
os.environ['DEBUG_VERBOSE'] = "0"
if os.path.realpath( '../' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '../' ) )
if os.path.realpath( '.' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '.' ) )
class RawMessages_2_TestCase(unittest.TestCase):
""" Command Test Case """
def test_record_object(self):
x = OrientRecord()
assert x._rid is None
assert x._version is None
assert x._class is None
def test_record_load(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
req_msg = RecordLoadMessage( connection )
res = req_msg.prepare( [ "#11:0", "*:2", _test_callback ] ) \
.send().fetch_response()
assert res._rid == "#11:0"
assert res._class == 'followed_by'
assert res._in != 0
assert res._out != 0
def test_record_count_with_no_opened_db(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
conn_msg = ConnectMessage( connection )
session_id = conn_msg.prepare( ("root", "root") )\
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
try:
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert False # we expect an exception because we need a db opened
except PyOrientDatabaseException:
assert True
def test_record_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
session_id = connection.session_id
assert session_id != -1
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert res is not 0
assert res > 0
def test_record_create_update(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
# ##################
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_GRAPH, "")
).send().fetch_response()
assert len(cluster_info) != 0
try:
create_class = CommandMessage(connection)
cluster = create_class.prepare((QUERY_CMD, "create class my_class "
"extends V"))\
.send().fetch_response()[0]
except PyOrientCommandException:
# class my_class already exists
pass
# classes are not allowed in record create/update/load
rec = { '@my_class': { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' } }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( cluster, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
rec = { '@my_class': { 'alloggio': 'albergo', 'lavoro': 'ufficio', 'vacanza': 'montagna' } }
update_success = ( RecordUpdateMessage(connection) )\
.prepare( ( cluster, rec_position._rid, rec ) )\
.send().fetch_response()
assert update_success[0] != 0
if connection.protocol <= 21:
return unittest.skip("Protocol {!r} does not works well".format(
connection.protocol )) # skip test
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + rec_position._rid ] )\
.send().fetch_response()
# res = [ ( RecordLoadMessage(connection) ).prepare(
# [ rec_position._rid ]
# ).send().fetch_response() ]
print("%r" % res[0]._rid)
print("%r" % res[0]._class)
print("%r" % res[0]._version)
print("%r" % res[0].alloggio)
print("%r" % res[0].lavoro)
print("%r" % res[0].vacanza)
assert res[0]._rid == '#11:0'
assert res[0]._class == 'my_class'
assert res[0]._version >= 0
assert res[0].alloggio == 'albergo'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'montagna'
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_record_delete(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
print("Sid: %s" % session_id)
assert session_id == connection.session_id
assert session_id != -1
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_DOCUMENT, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
rec = { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( 1, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
######################## Check Success
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + str(rec_position._rid) ] )\
.send().fetch_response()
import re
assert re.match( '#1:[0-9]', res[0]._rid )
assert res[0]._class is None
assert res[0]._version >= 0
assert res[0].alloggio == 'casa'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'mare'
######################## Delete Rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, rec_position._rid ) )\
.send().fetch_response()
assert deletion is True
# now try a failure in deletion for wrong rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, 11111 ) )\
.send().fetch_response()
assert deletion is False
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_data_cluster_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
print(cluster_info)
assert len(cluster_info) != 0
assert connection.session_id != -1
count_msg = DataClusterCountMessage( connection )
res1 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5) ] ).send().fetch_response()
assert res1 is not 0
assert res1 > 0
count_msg = DataClusterCountMessage( connection )
res2 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5), 1 ] ).send().fetch_response()
assert res2 is not 0
assert res2 > 0
count_msg = DataClusterCountMessage( connection )
res3 = count_msg.set_count_tombstones(1).set_cluster_ids( (0,1,2,3,4,5) )\
.prepare().send().fetch_response()
assert res3 is not 0
assert res3 > 0
assert res1 == res2
assert res3 == res2
assert res3 == res1
def test_query_async(self):
connection = OrientSocket( 'localhost', 2424 )
open_msg = DbOpenMessage(connection)
open_msg.set_db_name('GratefulDeadConcerts')\
.set_user('admin').set_pass('admin').prepare()\
.send().fetch_response()
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
try_select_async = CommandMessage(connection)
try_select_async.set_command_type(QUERY_ASYNC)\
.set_query("select from followed_by")\
.set_limit(50)\
.set_fetch_plan("*:0")\
.set_callback( _test_callback )\
.prepare()\
response = try_select_async.send().fetch_response()
assert response is None
def test_wrong_data_range(self):
connection = OrientSocket( 'localhost', 2424 )
db_name = "GratefulDeadConcerts"
db = DbOpenMessage(connection)
cluster_info = db.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
datarange = DataClusterDataRangeMessage(connection)
try:
value = datarange.prepare(32767).send().fetch_response()
except PyOrientCommandException as e:
print(repr(str(e)))
assert "IndexOutOfBoundsException" in str(e)
def test_data_range(self):
connection = OrientSocket( 'localhost', 2424 )
db_name = "GratefulDeadConcerts"
db = DbOpenMessage(connection)
cluster_info = db.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
cluster_info.dataClusters.sort(key=lambda cluster: cluster['id'])
for cluster in cluster_info:
# os.environ['DEBUG'] = '0' # silence debug
datarange = DataClusterDataRangeMessage(connection)
value = datarange.prepare(cluster['id']).send().fetch_response()
print("Cluster Name: %s, ID: %u: %s " \
% ( cluster['name'], cluster['id'], value ))
assert value is not []
assert value is not None
# x = RawMessages_2_TestCase('test_wrong_data_range').run()
| |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from robot.errors import ExecutionFailed, DataError, PassExecution
from robot.model import SuiteVisitor
from robot.result import TestSuite, Result
from robot.variables import GLOBAL_VARIABLES
from robot.utils import get_timestamp, NormalizedDict
from .context import EXECUTION_CONTEXTS
from .keywords import Keywords, Keyword
from .namespace import Namespace
from .status import SuiteStatus, TestStatus
from .timeouts import TestTimeout
# TODO: Some 'extract method' love needed here. Perhaps even 'extract class'.
class Runner(SuiteVisitor):
def __init__(self, output, settings):
self.result = None
self._output = output
self._settings = settings
self._suite = None
self._suite_status = None
self._executed_tests = None
@property
def _context(self):
return EXECUTION_CONTEXTS.current
@property
def _variables(self):
ctx = self._context
return ctx.variables if ctx else None
def start_suite(self, suite):
variables = GLOBAL_VARIABLES.copy()
variables.set_from_variable_table(suite.variables)
result = TestSuite(source=suite.source,
name=suite.name,
doc=suite.doc,
metadata=suite.metadata,
starttime=get_timestamp())
if not self.result:
result.set_criticality(self._settings.critical_tags,
self._settings.non_critical_tags)
self.result = Result(root_suite=result)
self.result.configure(status_rc=self._settings.status_rc,
stat_config=self._settings.statistics_config)
else:
self._suite.suites.append(result)
ns = Namespace(result, variables, self._variables,
suite.user_keywords, suite.imports)
EXECUTION_CONTEXTS.start_suite(ns, self._output, self._settings.dry_run)
self._context.set_suite_variables(result)
if not (self._suite_status and self._suite_status.failures):
ns.handle_imports()
variables.resolve_delayed()
result.doc = self._resolve_setting(result.doc)
result.metadata = [(self._resolve_setting(n), self._resolve_setting(v))
for n, v in result.metadata.items()]
self._context.set_suite_variables(result)
self._suite = result
self._suite_status = SuiteStatus(self._suite_status,
self._settings.exit_on_failure,
self._settings.skip_teardown_on_exit)
self._output.start_suite(ModelCombiner(suite, self._suite))
self._run_setup(suite.keywords.setup, self._suite_status)
self._executed_tests = NormalizedDict(ignore='_')
def _resolve_setting(self, value):
return self._variables.replace_string(value, ignore_errors=True)
def end_suite(self, suite):
self._suite.message = self._suite_status.message
self._context.report_suite_status(self._suite.status,
self._suite.full_message)
with self._context.suite_teardown():
failure = self._run_teardown(suite.keywords.teardown, self._suite_status)
if failure:
self._suite.suite_teardown_failed(unicode(failure))
self._suite.endtime = get_timestamp()
self._suite.message = self._suite_status.message
self._context.end_suite(self._suite)
self._suite = self._suite.parent
self._suite_status = self._suite_status.parent
def visit_test(self, test):
if test.name in self._executed_tests:
self._output.warn("Multiple test cases with name '%s' executed in "
"test suite '%s'." % (test.name, self._suite.longname))
self._executed_tests[test.name] = True
result = self._suite.tests.create(name=test.name,
doc=self._resolve_setting(test.doc),
tags=test.tags,
starttime=get_timestamp(),
timeout=self._get_timeout(test))
keywords = Keywords(test.keywords.normal, bool(test.template))
status = TestStatus(self._suite_status)
if not status.failures and not test.name:
status.test_failed('Test case name cannot be empty.', result.critical)
if not status.failures and not keywords:
status.test_failed('Test case contains no keywords.', result.critical)
try:
result.tags = self._context.variables.replace_list(result.tags)
except DataError, err:
status.test_failed('Replacing variables from test tags failed: %s'
% unicode(err), result.critical)
self._context.start_test(result)
self._output.start_test(ModelCombiner(result, test))
self._run_setup(test.keywords.setup, status, result)
try:
if not status.failures:
keywords.run(self._context)
except PassExecution, exception:
err = exception.earlier_failures
if err:
status.test_failed(err, result.critical)
else:
result.message = exception.message
except ExecutionFailed, err:
status.test_failed(err, result.critical)
if err.timeout:
self._context.timeout_occurred = True
result.status = status.status
result.message = status.message or result.message
if status.teardown_allowed:
with self._context.test_teardown(result):
self._run_teardown(test.keywords.teardown, status, result)
if not status.failures and result.timeout and result.timeout.timed_out():
status.test_failed(result.timeout.get_message(), result.critical)
result.message = status.message
result.status = status.status
result.endtime = get_timestamp()
self._output.end_test(ModelCombiner(result, test))
self._context.end_test(result)
def _get_timeout(self, test):
if not test.timeout:
return None
timeout = TestTimeout(test.timeout.value, test.timeout.message,
self._variables)
timeout.start()
return timeout
def _run_setup(self, setup, status, result=None):
if not status.failures:
exception = self._run_setup_or_teardown(setup, 'setup')
status.setup_executed(exception)
if result and isinstance(exception, PassExecution):
result.message = exception.message
def _run_teardown(self, teardown, status, result=None):
if status.teardown_allowed:
exception = self._run_setup_or_teardown(teardown, 'teardown')
status.teardown_executed(exception)
failed = not isinstance(exception, PassExecution)
if result and exception:
result.message = status.message if failed else exception.message
return exception if failed else None
def _run_setup_or_teardown(self, data, kw_type):
if not data:
return None
try:
name = self._variables.replace_string(data.name)
except DataError, err:
return err
if name.upper() in ('', 'NONE'):
return None
kw = Keyword(name, data.args, type=kw_type)
try:
kw.run(self._context)
except ExecutionFailed, err:
if err.timeout:
self._context.timeout_occurred = True
return err
class ModelCombiner(object):
def __init__(self, *models):
self.models = models
def __getattr__(self, name):
for model in self.models:
if hasattr(model, name):
return getattr(model, name)
raise AttributeError(name)
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from jacket.api.compute.openstack.compute import availability_zone as az_v21
from jacket.api.compute.openstack.compute import extension_info
from jacket.api.compute.openstack.compute.legacy_v2.contrib import availability_zone \
as az_v2
from jacket.api.compute.openstack.compute.legacy_v2 import servers as servers_v2
from jacket.api.compute.openstack.compute import servers as servers_v21
from jacket.api.compute.openstack import extensions
from jacket.compute import availability_zones
from jacket.compute.cloud import api as compute_api
from jacket.compute.cloud import flavors
from jacket import context
from jacket.db import compute
from jacket.compute import exception
from jacket.compute import servicegroup
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit.image import fake
from jacket.tests.compute.unit import matchers
from jacket.tests.compute.unit.objects import test_service
from oslo_config import cfg
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
return dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
if disabled:
return [__fake_service("compute-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("compute-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("compute-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("compute-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("compute-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("compute-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
def fake_service_is_up(self, service):
return service['binary'] != u"compute-network"
def fake_set_availability_zones(context, services):
return services
def fake_get_availability_zones(context):
return ['compute'], []
CONF = cfg.CONF
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
self.stub_out('compute.compute.service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
self.controller = self.availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
nova_network_timestamp = iso8601.parse_date("2012-12-26T14:45:24Z")
expected = [{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'compute-compute': {'active': True, 'available': True,
'updated_at': timestamp}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'compute-sched': {'active': True, 'available': True,
'updated_at': timestamp}},
'fake_host-2': {
'compute-network': {
'active': True,
'available': False,
'updated_at': nova_network_timestamp}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
self.assertEqual(expected, zones)
def test_availability_zone_detail_no_services(self):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'compute'}]}
self.stubs.Set(availability_zones, 'get_availability_zones',
fake_get_availability_zones)
resp_dict = self.controller.detail(self.req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
availability_zone = az_v2
def setUp(self):
super(AvailabilityZoneApiTestV2, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank('')
def test_availability_zone_detail_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.detail, self.non_admin_req)
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/fake/'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'availability_zone': 'compute',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self)
self.stub_out('compute.compute.instance_create', instance_create)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
'os-availability-zone',
'osapi_v21')
self.no_availability_zone_controller = servers_v21.ServersController(
extension_info=ext_info)
def _test_create_extra(self, params, controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
server.update(params)
body = dict(server=server)
server = controller.create(self.req, body=body).obj['server']
def test_create_instance_with_availability_zone_disabled(self):
params = {'availability_zone': 'foo'}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['availability_zone'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params, self.no_availability_zone_controller)
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('compute', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
admin_context = context.get_admin_context()
compute.service_create(admin_context, {'host': 'host1_zones',
'binary': "compute-compute",
'topic': 'compute',
'report_count': 0})
agg = compute.aggregate_create(admin_context,
{'name': 'agg1'}, {'availability_zone': 'compute'})
compute.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'compute'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
res = self.controller.create(self.req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-availability-zone': 'fake'}
self.controller = servers_v2.Controller(ext_mgr)
ext_mgr_no_az = extensions.ExtensionManager()
ext_mgr_no_az.extensions = {}
self.no_availability_zone_controller = servers_v2.Controller(
ext_mgr_no_az)
def test_create_instance_with_invalid_availability_zone_too_long(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_too_short(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_not_str(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
| |
#------------------------------------------------------------------------------
# Copyright (c) 2008, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
# However, when used with the GPL version of PyQt the additional terms described in the PyQt GPL exception also apply
#
# Author: Riverbank Computing Limited
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
# Standard library imports.
import logging
# Major package imports.
from pyface.qt import QtCore, QtGui
# Enthought library imports.
from traits.api import Instance, on_trait_change
# Local imports.
from split_tab_widget import SplitTabWidget
from pyface.message_dialog import error
from pyface.workbench.i_workbench_window_layout import \
MWorkbenchWindowLayout
# Logging.
logger = logging.getLogger(__name__)
# For mapping positions relative to the editor area.
_EDIT_AREA_MAP = {
'left': QtCore.Qt.LeftDockWidgetArea,
'right': QtCore.Qt.RightDockWidgetArea,
'top': QtCore.Qt.TopDockWidgetArea,
'bottom': QtCore.Qt.BottomDockWidgetArea
}
# For mapping positions relative to another view.
_VIEW_AREA_MAP = {
'left': (QtCore.Qt.Horizontal, True),
'right': (QtCore.Qt.Horizontal, False),
'top': (QtCore.Qt.Vertical, True),
'bottom': (QtCore.Qt.Vertical, False)
}
class WorkbenchWindowLayout(MWorkbenchWindowLayout):
""" The Qt4 implementation of the workbench window layout interface.
See the 'IWorkbenchWindowLayout' interface for the API documentation.
"""
#### Private interface ####################################################
# The widget that provides the editor area. We keep (and use) this
# separate reference because we can't always assume that it has been set to
# be the main window's central widget.
_qt4_editor_area = Instance(SplitTabWidget)
###########################################################################
# 'IWorkbenchWindowLayout' interface.
###########################################################################
def activate_editor(self, editor):
if editor.control is not None:
editor.control.show()
self._qt4_editor_area.setCurrentWidget(editor.control)
editor.set_focus()
return editor
def activate_view(self, view):
# FIXME v3: This probably doesn't work as expected.
view.control.raise_()
view.set_focus()
return view
def add_editor(self, editor, title):
if editor is None:
return None
try:
self._qt4_editor_area.addTab(self._qt4_get_editor_control(editor), title)
if editor._loading_on_open:
self._qt4_editor_tab_spinner(editor, '', True)
except Exception:
logger.exception('error creating editor control [%s]', editor.id)
return editor
def add_view(self, view, position=None, relative_to=None, size=(-1, -1)):
if view is None:
return None
try:
self._qt4_add_view(view, position, relative_to, size)
view.visible = True
except Exception:
logger.exception('error creating view control [%s]', view.id)
# Even though we caught the exception, it sometimes happens that
# the view's control has been created as a child of the application
# window (or maybe even the dock control). We should destroy the
# control to avoid bad UI effects.
view.destroy_control()
# Additionally, display an error message to the user.
error(self.window.control, 'Unable to add view [%s]' % view.id,
'Workbench Plugin Error')
return view
def close_editor(self, editor):
if editor.control is not None:
editor.control.close()
return editor
def close_view(self, view):
self.hide_view(view)
return view
def close(self):
# Don't fire signals for editors that have destroyed their controls.
QtCore.QObject.disconnect(self._qt4_editor_area,
QtCore.SIGNAL('hasFocus'), self._qt4_editor_focus)
self._qt4_editor_area.clear()
# Delete all dock widgets.
for v in self.window.views:
if self.contains_view(v):
self._qt4_delete_view_dock_widget(v)
def create_initial_layout(self, parent):
self._qt4_editor_area = editor_area = SplitTabWidget(parent)
QtCore.QObject.connect(editor_area, QtCore.SIGNAL('hasFocus'),
self._qt4_editor_focus)
# We are interested in focus changes but we get them from the editor
# area rather than qApp to allow the editor area to restrict them when
# needed.
QtCore.QObject.connect(
editor_area, QtCore.SIGNAL('focusChanged(QWidget *,QWidget *)'),
self._qt4_view_focus_changed)
QtCore.QObject.connect(self._qt4_editor_area,
QtCore.SIGNAL('tabTextChanged(QWidget *, QString)'),
self._qt4_editor_title_changed)
editor_area.new_window_request.connect(self._qt4_new_window_request)
editor_area.tab_close_request.connect(self._qt4_tab_close_request)
editor_area.tab_window_changed.connect(self._qt4_tab_window_changed)
return editor_area
def contains_view(self, view):
return hasattr(view, '_qt4_dock')
def hide_editor_area(self):
self._qt4_editor_area.hide()
def hide_view(self, view):
view._qt4_dock.hide()
view.visible = False
return view
def refresh(self):
# Nothing to do.
pass
def reset_editors(self):
self._qt4_editor_area.setCurrentIndex(0)
def reset_views(self):
# Qt doesn't provide information about the order of dock widgets in a
# dock area.
pass
def show_editor_area(self):
self._qt4_editor_area.show()
def show_view(self, view):
view._qt4_dock.show()
view.visible = True
#### Methods for saving and restoring the layout ##########################
def get_view_memento(self):
# Get the IDs of the views in the main window. This information is
# also in the QMainWindow state, but that is opaque.
view_ids = [v.id for v in self.window.views if self.contains_view(v)]
# Everything else is provided by QMainWindow.
state = str(self.window.control.saveState())
return (0, (view_ids, state))
def set_view_memento(self, memento):
version, mdata = memento
# There has only ever been version 0 so far so check with an assert.
assert version == 0
# Now we know the structure of the memento we can "parse" it.
view_ids, state = mdata
# Get a list of all views that have dock widgets and mark them.
dock_views = [v for v in self.window.views if self.contains_view(v)]
for v in dock_views:
v._qt4_gone = True
# Create a dock window for all views that had one last time.
for v in self.window.views:
# Make sure this is in a known state.
v.visible = False
for vid in view_ids:
if vid == v.id:
# Create the dock widget if needed and make sure that it is
# invisible so that it matches the state of the visible
# trait. Things will all come right when the main window
# state is restored below.
self._qt4_create_view_dock_widget(v).setVisible(False)
if v in dock_views:
delattr(v, '_qt4_gone')
break
# Remove any remain unused dock widgets.
for v in dock_views:
try:
delattr(v, '_qt4_gone')
except AttributeError:
pass
else:
self._qt4_delete_view_dock_widget(v)
# Restore the state. This will update the view's visible trait through
# the dock window's toggle action.
self.window.control.restoreState(state)
def get_editor_memento(self):
# Get the layout of the editors.
editor_layout = self._qt4_editor_area.saveState()
# Get a memento for each editor that describes its contents.
editor_references = self._get_editor_references()
return (0, (editor_layout, editor_references))
def set_editor_memento(self, memento):
version, mdata = memento
# There has only ever been version 0 so far so check with an assert.
assert version == 0
# Now we know the structure of the memento we can "parse" it.
editor_layout, editor_references = mdata
def resolve_id(id):
# Get the memento for the editor contents (if any).
editor_memento = editor_references.get(id)
if editor_memento is None:
return None
# Create the restored editor.
editor = self.window.editor_manager.set_editor_memento(
editor_memento)
if editor is None:
return None
# Save the editor.
self.window.editors.append(editor)
# Create the control if needed and return it.
return self._qt4_get_editor_control(editor)
self._qt4_editor_area.restoreState(editor_layout, resolve_id)
def get_toolkit_memento(self):
return (0, dict(geometry=str(self.window.control.saveGeometry())))
def set_toolkit_memento(self, memento):
if hasattr(memento, 'toolkit_data'):
data = memento.toolkit_data
if isinstance(data, tuple) and len(data) == 2:
version, datadict = data
if version == 0:
geometry = datadict.pop('geometry', None)
if geometry is not None:
self.window.control.restoreGeometry(geometry)
###########################################################################
# Private interface.
###########################################################################
def _qt4_editor_focus(self, new):
""" Handle an editor getting the focus. """
for editor in self.window.editors:
control = editor.control
editor.has_focus = control is new or \
(control is not None and new in control.children())
def _qt4_editor_title_changed(self, control, title):
""" Handle the title being changed """
for editor in self.window.editors:
if editor.control == control: editor.name = unicode(title)
def _qt4_editor_tab_spinner(self, editor, name, new):
# Do we need to do this verification?
tw, tidx = self._qt4_editor_area._tab_widget(editor.control)
if new: tw.show_button(tidx)
else: tw.hide_button(tidx)
if not new and not editor == self.window.active_editor:
self._qt4_editor_area.setTabTextColor(editor.control, QtCore.Qt.red)
@on_trait_change('window:active_editor')
def _qt4_active_editor_changed(self, old, new):
""" Handle change of active editor """
# Reset tab title to foreground color
self._qt4_editor_area.setTabTextColor(new.control)
def _qt4_view_focus_changed(self, old, new):
""" Handle the change of focus for a view. """
focus_part = None
if new is not None:
# Handle focus changes to views.
for view in self.window.views:
if view.control is not None and view.control.isAncestorOf(new):
view.has_focus = True
focus_part = view
break
if old is not None:
# Handle focus changes from views.
for view in self.window.views:
if view is not focus_part and view.control is not None and view.control.isAncestorOf(old):
view.has_focus = False
break
def _qt4_new_window_request(self, pos, control):
""" Handle a tab tear-out request from the splitter widget. """
editor = self._qt4_remove_editor_with_control(control)
kind = self.window.editor_manager.get_editor_kind(editor)
window = self.window.workbench.create_window()
window.open()
window.add_editor(editor)
window.editor_manager.add_editor(editor, kind)
window.position = (pos.x(), pos.y())
window.size = self.window.size
window.activate_editor(editor)
editor.window = window
def _qt4_tab_close_request(self, control):
""" Handle a tabCloseRequest from the splitter widget. """
for editor in self.window.editors:
if editor.control == control:
editor.close()
break
def _qt4_tab_window_changed(self, control):
""" Handle a tab drag to a different WorkbenchWindow. """
editor = self._qt4_remove_editor_with_control(control)
kind = self.window.editor_manager.get_editor_kind(editor)
while not control.isWindow():
control = control.parent()
for window in self.window.workbench.windows:
if window.control == control:
window.editors.append(editor)
window.editor_manager.add_editor(editor, kind)
window.layout._qt4_get_editor_control(editor)
window.activate_editor(editor)
editor.window = window
break
def _qt4_remove_editor_with_control(self, control):
""" Finds the editor associated with 'control' and removes it. Returns
the editor, or None if no editor was found.
"""
for editor in self.window.editors:
if editor.control == control:
self.editor_closing = editor
control.removeEventFilter(self._qt4_mon)
self.editor_closed = editor
# Make sure that focus events get fired if this editor is
# subsequently added to another window.
editor.has_focus = False
return editor
def _qt4_get_editor_control(self, editor):
""" Create the editor control if it hasn't already been done. """
if editor.control is None:
self.editor_opening = editor
# We must provide a parent (because TraitsUI checks for it when
# deciding what sort of panel to create) but it can't be the editor
# area (because it will be automatically added to the base
# QSplitter).
editor.control = editor.create_control(self.window.control)
editor.control.setObjectName(editor.id)
editor.on_trait_change(self._qt4_editor_tab_spinner, '_loading')
self.editor_opened = editor
def on_name_changed(editor, trait_name, old, new):
self._qt4_editor_area.setWidgetTitle(editor.control, editor.name)
editor.on_trait_change(on_name_changed, 'name')
self._qt4_monitor(editor.control)
return editor.control
def _qt4_add_view(self, view, position, relative_to, size):
""" Add a view. """
# If no specific position is specified then use the view's default
# position.
if position is None:
position = view.position
dw = self._qt4_create_view_dock_widget(view, size)
mw = self.window.control
try:
rel_dw = relative_to._qt4_dock
except AttributeError:
rel_dw = None
if rel_dw is None:
# If we are trying to add a view with a non-existent item, then
# just default to the left of the editor area.
if position == 'with':
position = 'left'
# Position the view relative to the editor area.
try:
dwa = _EDIT_AREA_MAP[position]
except KeyError:
raise ValueError, "unknown view position: %s" % position
mw.addDockWidget(dwa, dw)
elif position == 'with':
# FIXME v3: The Qt documentation says that the second should be
# placed above the first, but it always seems to be underneath (ie.
# hidden) which is not what the user is expecting.
mw.tabifyDockWidget(rel_dw, dw)
else:
try:
orient, swap = _VIEW_AREA_MAP[position]
except KeyError:
raise ValueError, "unknown view position: %s" % position
mw.splitDockWidget(rel_dw, dw, orient)
# The Qt documentation implies that the layout direction can be
# used to position the new dock widget relative to the existing one
# but I could only get the button positions to change. Instead we
# move things around afterwards if required.
if swap:
mw.removeDockWidget(rel_dw)
mw.splitDockWidget(dw, rel_dw, orient)
rel_dw.show()
def _qt4_create_view_dock_widget(self, view, size=(-1, -1)):
""" Create a dock widget that wraps a view. """
# See if it has already been created.
try:
dw = view._qt4_dock
except AttributeError:
dw = QtGui.QDockWidget(view.name, self.window.control)
dw.setWidget(_ViewContainer(size, self.window.control))
dw.setObjectName(view.id)
dw.connect(dw.toggleViewAction(), QtCore.SIGNAL('toggled(bool)'),
self._qt4_handle_dock_visibility)
dw.connect(dw, QtCore.SIGNAL('visibilityChanged(bool)'),
self._qt4_handle_dock_visibility)
# Save the dock window.
view._qt4_dock = dw
def on_name_changed():
view._qt4_dock.setWindowTitle(view.name)
view.on_trait_change(on_name_changed, 'name')
# Make sure the view control exists.
if view.control is None:
# Make sure that the view knows which window it is in.
view.window = self.window
try:
view.control = view.create_control(self.window.control)
except:
# Tidy up if the view couldn't be created.
delattr(view, '_qt4_dock')
self.window.control.removeDockWidget(dw)
dw.deleteLater()
del dw
raise
dw.widget().setCentralWidget(view.control)
return dw
def _qt4_delete_view_dock_widget(self, view):
""" Delete a view's dock widget. """
dw = view._qt4_dock
# Disassociate the view from the dock.
if view.control is not None:
view.control.setParent(None)
delattr(view, '_qt4_dock')
# Delete the dock (and the view container).
self.window.control.removeDockWidget(dw)
dw.deleteLater()
def _qt4_handle_dock_visibility(self, checked):
""" Handle the visibility of a dock window changing. """
# Find the dock window by its toggle action.
for v in self.window.views:
try:
dw = v._qt4_dock
except AttributeError:
continue
sender = dw.sender()
if (sender is dw.toggleViewAction() or
sender in dw.children()):
# Toggling the action or pressing the close button on
# the view
v.visible = checked
def _qt4_monitor(self, control):
""" Install an event filter for a view or editor control to keep an eye
on certain events.
"""
# Create the monitoring object if needed.
try:
mon = self._qt4_mon
except AttributeError:
mon = self._qt4_mon = _Monitor(self)
control.installEventFilter(mon)
class _Monitor(QtCore.QObject):
""" This class monitors a view or editor control. """
def __init__(self, layout):
QtCore.QObject.__init__(self, layout.window.control)
self._layout = layout
def eventFilter(self, obj, e):
if isinstance(e, QtGui.QCloseEvent):
for editor in self._layout.window.editors:
if editor.control is obj:
self._layout.editor_closing = editor
editor.destroy_control()
self._layout.editor_closed = editor
break
return False
class _ViewContainer(QtGui.QMainWindow):
""" This class is a container for a view that allows an initial size
(specified as a tuple) to be set.
"""
def __init__(self, size, main_window):
""" Initialise the object. """
QtGui.QMainWindow.__init__(self)
# Save the size and main window.
self._width, self._height = size
self._main_window = main_window
def sizeHint(self):
""" Reimplemented to return the initial size or the view's current
size.
"""
sh = self.centralWidget().sizeHint()
if self._width > 0:
if self._width > 1:
w = self._width
else:
w = self._main_window.width() * self._width
sh.setWidth(int(w))
if self._height > 0:
if self._height > 1:
h = self._height
else:
h = self._main_window.height() * self._height
sh.setHeight(int(h))
return sh
def showEvent(self, e):
""" Reimplemented to use the view's current size once shown. """
self._width = self._height = -1
QtGui.QMainWindow.showEvent(self, e)
#### EOF ######################################################################
| |
"""
altgraph.Graph - Base Graph class
=================================
..
#--Version 2.1
#--Bob Ippolito October, 2004
#--Version 2.0
#--Istvan Albert June, 2004
#--Version 1.0
#--Nathan Denny, May 27, 1999
"""
from altgraph import GraphError
from collections import deque
class Graph(object):
"""
The Graph class represents a directed graph with *N* nodes and *E* edges.
Naming conventions:
- the prefixes such as *out*, *inc* and *all* will refer to methods
that operate on the outgoing, incoming or all edges of that node.
For example: :py:meth:`inc_degree` will refer to the degree of the node
computed over the incoming edges (the number of neighbours linking to
the node).
- the prefixes such as *forw* and *back* will refer to the
orientation of the edges used in the method with respect to the node.
For example: :py:meth:`forw_bfs` will start at the node then use the outgoing
edges to traverse the graph (goes forward).
"""
def __init__(self, edges=None):
"""
Initialization
"""
self.next_edge = 0
self.nodes, self.edges = {}, {}
self.hidden_edges, self.hidden_nodes = {}, {}
if edges is not None:
for item in edges:
if len(item) == 2:
head, tail = item
self.add_edge(head, tail)
elif len(item) == 3:
head, tail, data = item
self.add_edge(head, tail, data)
else:
raise GraphError("Cannot create edge from %s"%(item,))
def __repr__(self):
return '<Graph: %d nodes, %d edges>' % (
self.number_of_nodes(), self.number_of_edges())
def add_node(self, node, node_data=None):
"""
Adds a new node to the graph. Arbitrary data can be attached to the
node via the node_data parameter. Adding the same node twice will be
silently ignored.
The node must be a hashable value.
"""
#
# the nodes will contain tuples that will store incoming edges,
# outgoing edges and data
#
# index 0 -> incoming edges
# index 1 -> outgoing edges
if node in self.hidden_nodes:
# Node is present, but hidden
return
if node not in self.nodes:
self.nodes[node] = ([], [], node_data)
def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):
"""
Adds a directed edge going from head_id to tail_id.
Arbitrary data can be attached to the edge via edge_data.
It may create the nodes if adding edges between nonexisting ones.
:param head_id: head node
:param tail_id: tail node
:param edge_data: (optional) data attached to the edge
:param create_nodes: (optional) creates the head_id or tail_id node in case they did not exist
"""
# shorcut
edge = self.next_edge
# add nodes if on automatic node creation
if create_nodes:
self.add_node(head_id)
self.add_node(tail_id)
# update the corresponding incoming and outgoing lists in the nodes
# index 0 -> incoming edges
# index 1 -> outgoing edges
try:
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
except KeyError:
raise GraphError('Invalid nodes %s -> %s' % (head_id, tail_id))
# store edge information
self.edges[edge] = (head_id, tail_id, edge_data)
self.next_edge += 1
def hide_edge(self, edge):
"""
Hides an edge from the graph. The edge may be unhidden at some later
time.
"""
try:
head_id, tail_id, edge_data = self.hidden_edges[edge] = self.edges[edge]
self.nodes[tail_id][0].remove(edge)
self.nodes[head_id][1].remove(edge)
del self.edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
def hide_node(self, node):
"""
Hides a node from the graph. The incoming and outgoing edges of the
node will also be hidden. The node may be unhidden at some later time.
"""
try:
all_edges = self.all_edges(node)
self.hidden_nodes[node] = (self.nodes[node], all_edges)
for edge in all_edges:
self.hide_edge(edge)
del self.nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node)
def restore_node(self, node):
"""
Restores a previously hidden node back into the graph and restores
all of its incoming and outgoing edges.
"""
try:
self.nodes[node], all_edges = self.hidden_nodes[node]
for edge in all_edges:
self.restore_edge(edge)
del self.hidden_nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node)
def restore_edge(self, edge):
"""
Restores a previously hidden edge back into the graph.
"""
try:
head_id, tail_id, data = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = head_id, tail_id, data
del self.hidden_edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
def restore_all_edges(self):
"""
Restores all hidden edges.
"""
for edge in list(self.hidden_edges.keys()):
try:
self.restore_edge(edge)
except GraphError:
pass
def restore_all_nodes(self):
"""
Restores all hidden nodes.
"""
for node in list(self.hidden_nodes.keys()):
self.restore_node(node)
def __contains__(self, node):
"""
Test whether a node is in the graph
"""
return node in self.nodes
def edge_by_id(self, edge):
"""
Returns the edge that connects the head_id and tail_id nodes
"""
try:
head, tail, data = self.edges[edge]
except KeyError:
head, tail = None, None
raise GraphError('Invalid edge %s' % edge)
return (head, tail)
def edge_by_node(self, head, tail):
"""
Returns the edge that connects the head_id and tail_id nodes
"""
for edge in self.out_edges(head):
if self.tail(edge) == tail:
return edge
return None
def number_of_nodes(self):
"""
Returns the number of nodes
"""
return len(self.nodes)
def number_of_edges(self):
"""
Returns the number of edges
"""
return len(self.edges)
def __iter__(self):
"""
Iterates over all nodes in the graph
"""
return iter(self.nodes)
def node_list(self):
"""
Return a list of the node ids for all visible nodes in the graph.
"""
return list(self.nodes.keys())
def edge_list(self):
"""
Returns an iterator for all visible nodes in the graph.
"""
return list(self.edges.keys())
def number_of_hidden_edges(self):
"""
Returns the number of hidden edges
"""
return len(self.hidden_edges)
def number_of_hidden_nodes(self):
"""
Returns the number of hidden nodes
"""
return len(self.hidden_nodes)
def hidden_node_list(self):
"""
Returns the list with the hidden nodes
"""
return list(self.hidden_nodes.keys())
def hidden_edge_list(self):
"""
Returns a list with the hidden edges
"""
return list(self.hidden_edges.keys())
def describe_node(self, node):
"""
return node, node data, outgoing edges, incoming edges for node
"""
incoming, outgoing, data = self.nodes[node]
return node, data, outgoing, incoming
def describe_edge(self, edge):
"""
return edge, edge data, head, tail for edge
"""
head, tail, data = self.edges[edge]
return edge, data, head, tail
def node_data(self, node):
"""
Returns the data associated with a node
"""
return self.nodes[node][2]
def edge_data(self, edge):
"""
Returns the data associated with an edge
"""
return self.edges[edge][2]
def update_edge_data(self, edge, edge_data):
"""
Replace the edge data for a specific edge
"""
self.edges[edge] = self.edges[edge][0:2] + (edge_data,)
def head(self, edge):
"""
Returns the node of the head of the edge.
"""
return self.edges[edge][0]
def tail(self, edge):
"""
Returns node of the tail of the edge.
"""
return self.edges[edge][1]
def out_nbrs(self, node):
"""
List of nodes connected by outgoing edges
"""
l = [self.tail(n) for n in self.out_edges(node)]
return l
def inc_nbrs(self, node):
"""
List of nodes connected by incoming edges
"""
l = [self.head(n) for n in self.inc_edges(node)]
return l
def all_nbrs(self, node):
"""
List of nodes connected by incoming and outgoing edges
"""
l = dict.fromkeys( self.inc_nbrs(node) + self.out_nbrs(node) )
return list(l)
def out_edges(self, node):
"""
Returns a list of the outgoing edges
"""
try:
return list(self.nodes[node][1])
except KeyError:
raise GraphError('Invalid node %s' % node)
return None
def inc_edges(self, node):
"""
Returns a list of the incoming edges
"""
try:
return list(self.nodes[node][0])
except KeyError:
raise GraphError('Invalid node %s' % node)
return None
def all_edges(self, node):
"""
Returns a list of incoming and outging edges.
"""
return set(self.inc_edges(node) + self.out_edges(node))
def out_degree(self, node):
"""
Returns the number of outgoing edges
"""
return len(self.out_edges(node))
def inc_degree(self, node):
"""
Returns the number of incoming edges
"""
return len(self.inc_edges(node))
def all_degree(self, node):
"""
The total degree of a node
"""
return self.inc_degree(node) + self.out_degree(node)
def _topo_sort(self, forward=True):
"""
Topological sort.
Returns a list of nodes where the successors (based on outgoing and
incoming edges selected by the forward parameter) of any given node
appear in the sequence after that node.
"""
topo_list = []
queue = deque()
indeg = {}
# select the operation that will be performed
if forward:
get_edges = self.out_edges
get_degree = self.inc_degree
get_next = self.tail
else:
get_edges = self.inc_edges
get_degree = self.out_degree
get_next = self.head
for node in self.node_list():
degree = get_degree(node)
if degree:
indeg[node] = degree
else:
queue.append(node)
while queue:
curr_node = queue.popleft()
topo_list.append(curr_node)
for edge in get_edges(curr_node):
tail_id = get_next(edge)
if tail_id in indeg:
indeg[tail_id] -= 1
if indeg[tail_id] == 0:
queue.append(tail_id)
if len(topo_list) == len(self.node_list()):
valid = True
else:
# the graph has cycles, invalid topological sort
valid = False
return (valid, topo_list)
def forw_topo_sort(self):
"""
Topological sort.
Returns a list of nodes where the successors (based on outgoing edges)
of any given node appear in the sequence after that node.
"""
return self._topo_sort(forward=True)
def back_topo_sort(self):
"""
Reverse topological sort.
Returns a list of nodes where the successors (based on incoming edges)
of any given node appear in the sequence after that node.
"""
return self._topo_sort(forward=False)
def _bfs_subgraph(self, start_id, forward=True):
"""
Private method creates a subgraph in a bfs order.
The forward parameter specifies whether it is a forward or backward
traversal.
"""
if forward:
get_bfs = self.forw_bfs
get_nbrs = self.out_nbrs
else:
get_bfs = self.back_bfs
get_nbrs = self.inc_nbrs
g = Graph()
bfs_list = get_bfs(start_id)
for node in bfs_list:
g.add_node(node)
for node in bfs_list:
for nbr_id in get_nbrs(node):
g.add_edge(node, nbr_id)
return g
def forw_bfs_subgraph(self, start_id):
"""
Creates and returns a subgraph consisting of the breadth first
reachable nodes based on their outgoing edges.
"""
return self._bfs_subgraph(start_id, forward=True)
def back_bfs_subgraph(self, start_id):
"""
Creates and returns a subgraph consisting of the breadth first
reachable nodes based on the incoming edges.
"""
return self._bfs_subgraph(start_id, forward=False)
def iterdfs(self, start, end=None, forward=True):
"""
Collecting nodes in some depth first traversal.
The forward parameter specifies whether it is a forward or backward
traversal.
"""
visited, stack = set([start]), deque([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while stack:
curr_node = stack.pop()
yield curr_node
if curr_node == end:
break
for edge in sorted(get_edges(curr_node)):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
stack.append(tail)
def iterdata(self, start, end=None, forward=True, condition=None):
"""
Perform a depth-first walk of the graph (as ``iterdfs``)
and yield the item data of every node where condition matches. The
condition callback is only called when node_data is not None.
"""
visited, stack = set([start]), deque([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
get_data = self.node_data
while stack:
curr_node = stack.pop()
curr_data = get_data(curr_node)
if curr_data is not None:
if condition is not None and not condition(curr_data):
continue
yield curr_data
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
stack.append(tail)
def _iterbfs(self, start, end=None, forward=True):
"""
The forward parameter specifies whether it is a forward or backward
traversal. Returns a list of tuples where the first value is the hop
value the second value is the node id.
"""
queue, visited = deque([(start, 0)]), set([start])
# the direction of the bfs depends on the edges that are sampled
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while queue:
curr_node, curr_step = queue.popleft()
yield (curr_node, curr_step)
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
queue.append((tail, curr_step + 1))
def forw_bfs(self, start, end=None):
"""
Returns a list of nodes in some forward BFS order.
Starting from the start node the breadth first search proceeds along
outgoing edges.
"""
return [node for node, step in self._iterbfs(start, end, forward=True)]
def back_bfs(self, start, end=None):
"""
Returns a list of nodes in some backward BFS order.
Starting from the start node the breadth first search proceeds along
incoming edges.
"""
return [node for node, step in self._iterbfs(start, end, forward=False)]
def forw_dfs(self, start, end=None):
"""
Returns a list of nodes in some forward DFS order.
Starting with the start node the depth first search proceeds along
outgoing edges.
"""
return list(self.iterdfs(start, end, forward=True))
def back_dfs(self, start, end=None):
"""
Returns a list of nodes in some backward DFS order.
Starting from the start node the depth first search proceeds along
incoming edges.
"""
return list(self.iterdfs(start, end, forward=False))
def connected(self):
"""
Returns :py:data:`True` if the graph's every node can be reached from every
other node.
"""
node_list = self.node_list()
for node in node_list:
bfs_list = self.forw_bfs(node)
if len(bfs_list) != len(node_list):
return False
return True
def clust_coef(self, node):
"""
Computes and returns the local clustering coefficient of node. The
local cluster coefficient is proportion of the actual number of edges between
neighbours of node and the maximum number of edges between those neighbours.
See <http://en.wikipedia.org/wiki/Clustering_coefficient#Local_clustering_coefficient>
for a formal definition.
"""
num = 0
nbr_set = set(self.out_nbrs(node))
if node in nbr_set:
nbr_set.remove(node) # loop defense
for nbr in nbr_set:
sec_set = set(self.out_nbrs(nbr))
if nbr in sec_set:
sec_set.remove(nbr) # loop defense
num += len(nbr_set & sec_set)
nbr_num = len(nbr_set)
if nbr_num:
clust_coef = float(num) / (nbr_num * (nbr_num - 1))
else:
clust_coef = 0.0
return clust_coef
def get_hops(self, start, end=None, forward=True):
"""
Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print (graph.get_hops(1, 8))
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops
"""
if forward:
return list(self._iterbfs(start=start, end=end, forward=True))
else:
return list(self._iterbfs(start=start, end=end, forward=False))
| |
#!/usr/env/python
import time
import numpy as np
from collections import namedtuple
from motif import findMotifOfLengthFast, nonOverlappingMinima
from motif import entropy
from ..utils import evaluate
from ..utils.arrays import isScalar
from ..utils.sequence import isListOrTuple
from ..utils.subseq import distsToRows
# ================================================================
# Data Structures
# ================================================================
_infoFields = [
'score',
'idxs',
'length',
'fromSeq',
]
OutcastInfo = namedtuple('OutcastInfo', _infoFields)
# ================================================================
# Functions
# ================================================================
# ------------------------------------------------ utility funcs
def computeAllSeedIdxsFromPair(idx1, idx2, stepLen, numShifts=0):
if stepLen > 0. and numShifts < 1:
numShifts = int(1. / stepLen)
seedIdxs = [idx1, idx2]
for idx in seedIdxs[:]:
i = idx
j = idx
for shft in range(numShifts):
i -= stepLen
j += stepLen
seedIdxs += [i, j]
return seedIdxs
def startsAndEndsWithinBounds(startIdxs, subseqLen, seqLen):
startIdxs = np.sort(np.asarray(startIdxs))
startIdxs = startIdxs[startIdxs >= 0]
startIdxs = startIdxs[startIdxs < seqLen - subseqLen]
endIdxs = startIdxs + subseqLen
return startIdxs, endIdxs
# ------------------------------------------------ search funcs
def findAllOutcastInstances(seqs, lengths):
outcast = findOutcast(seqs, lengths)
instances = []
for idx in outcast.idxs:
print "creating instance {}-{}".format(idx, idx + outcast.length)
inst = evaluate.createPatternInstance(idx, idx + outcast.length)
instances.append(inst)
return instances
def findOutcast(seqs, lengths):
tstart = time.clock()
if isScalar(lengths):
lengths = [lengths]
if not isListOrTuple(seqs):
seqs = [seqs]
bestScore = -np.inf
bestOutcast = None
for m in lengths:
info = findOutcastOfLength(seqs, m)
if info and info.score > bestScore:
bestScore = info.score
bestOutcast = info
print("Found best outcast in {}s".format(time.clock() - tstart))
return bestOutcast
def findOutcastOfLength(seqs, length, shiftStep=.1, norm='each', mdl=False):
# if numShifts < 0:
# numShifts = 1
# stepLen = shiftStep * length
motif = findMotifOfLengthFast(seqs[:], length, norm=norm)
Xnorm = motif.Xnorm
seedIdxs = computeAllSeedIdxsFromPair(motif.idx1, motif.idx2, shiftStep)
# XXX this assumes only one seq
seedIdxs, _ = startsAndEndsWithinBounds(seedIdxs, length, len(seqs[0]))
bestScore = -np.inf
bestOutcast = None
for idx in seedIdxs:
seed = Xnorm[idx]
if mdl:
info = findOutcastInstancesMDL(Xnorm, seed, length)
else:
info = findOutcastInstances(Xnorm, seed, length)
if info and info.score > bestScore:
bestScore = info.score
bestOutcast = info
print "bestOutcast idxs at length {}: {}".format(length, bestOutcast.idxs,
bestOutcast.length)
return bestOutcast
def findOutcastInstances(Xnorm, seed, length, maxOverlapFraction=.1, fromSeq=None):
minSpacing = max(int((1. - maxOverlapFraction) * length), 1)
dists = distsToRows(Xnorm, seed)
minimaIdxs = nonOverlappingMinima(dists, minSpacing, fromSeq=fromSeq)
minimaDists = dists[minimaIdxs]
# sort indices of relative minima in increasing order of distance
sortIdxs = np.argsort(minimaDists)
idxs = minimaIdxs[sortIdxs]
dists = minimaDists[sortIdxs]
centroidSums = seed
centroid = np.copy(seed)
distSum_pattern = 0
vectLen = len(seed)
bestGap = -np.inf
bestIdxs = None
for i, idx in enumerate(idxs[1:]):
k = i + 2.
# pattern model
x = Xnorm[idx]
diff = centroid - x
distSum_pattern += np.dot(diff, diff) / vectLen
centroidSums += x
centroid = centroidSums / k
# random walk
AVG_DIST_TO_RAND_WALK = 1.
# AVG_DIST_TO_RAND_WALK = .5
distSum_walk = AVG_DIST_TO_RAND_WALK * k
# nearest enemy
distSum_enemy = np.inf
if k < len(idxs):
nextIdx = idxs[k]
nextX = Xnorm[nextIdx]
diff_enemy = centroid - nextX
distSum_enemy = np.dot(diff_enemy, diff_enemy) / vectLen * k
rivalSum = min(distSum_walk, distSum_enemy)
gap = rivalSum - distSum_pattern
if gap > bestGap:
bestGap = gap
bestIdxs = idxs[:k]
return OutcastInfo(score=bestGap, idxs=bestIdxs, length=length, fromSeq=fromSeq)
def findOutcastInstancesMDL(Xnorm, seed, length, maxOverlapFraction=.1,
fromSeq=None, mdlBits=6, useEnemy=True):
minSpacing = max(int((1. - maxOverlapFraction) * length), 1)
dists = distsToRows(Xnorm, seed)
minimaIdxs = nonOverlappingMinima(dists, minSpacing, fromSeq=fromSeq)
minimaDists = dists[minimaIdxs]
# sort indices of relative minima in increasing order of distance
sortIdxs = np.argsort(minimaDists)
idxs = minimaIdxs[sortIdxs]
dists = minimaDists[sortIdxs]
# instanceIdxs = [idx1, idx2]
# compute quantized subsequences
numLevels = int(2**mdlBits)
mins = np.min(Xnorm, axis=1).reshape((-1, 1))
maxs = np.max(Xnorm, axis=1).reshape((-1, 1))
ranges = (maxs - mins)
Xquant = (Xnorm - mins) / ranges * (numLevels - 1) # 8 bits -> {0..255}
Xquant = Xquant.astype(np.int)
# initialize MDL stats
row = Xquant[idxs[0]]
centroidSums = np.copy(row)
hypothesisEnt = entropy(row)
origEnt = hypothesisEnt
bitsave = -np.inf # ensure 2nd subseq gets added
instanceIdxs = [idxs[0]]
for i, idx in enumerate(idxs[1:]):
k = i + 2.
subseq = Xquant[idx]
# compute original entropy of this instance along with current ones
newOrigEnt = origEnt + entropy(subseq)
# compute centroid when this instance is added
newCentroidSums = centroidSums + subseq
newCentroid = (newCentroidSums / k).astype(np.int)
# compute coded entropy when this instance is added
newInstanceIdxs = instanceIdxs[:]
newInstanceIdxs.append(idx)
# diffs = Xquant[instanceIdxs] - newCentroid # works better, but nonsensical
diffs = Xquant[newInstanceIdxs] - newCentroid
newCodedEnt = np.sum(entropy(diffs, axis=1))
# compute total bitsave if this instance is added
newCodingSave = newOrigEnt - newCodedEnt
newHypothesisEnt = entropy(newCentroid)
newBitsave = newCodingSave - newHypothesisEnt
# divide by 2 as heuristic to reduce entropy, since description length
# doesn't correspond to any obvious probabilistic model
# noiseDiffs = Xquant[newInstanceIdxs] // 2
# noiseCodedEnt = np.sum(entropy(noiseDiffs, axis=1))
noiseCodedEnt = newCodedEnt / 2
enemyCodedEnt = -np.inf
if k < len(idxs):
nextIdx = idxs[k]
enemySubseq = Xquant[nextIdx]
enemyDiffs = Xquant[newInstanceIdxs] - enemySubseq
enemyCodedEnt = np.sum(entropy(enemyDiffs, axis=1))
rivalEnt = min(noiseCodedEnt, enemyCodedEnt)
newBitsave += rivalEnt
if newBitsave > bitsave:
bitsave = newBitsave
origEnt = newOrigEnt
centroidSums = newCentroidSums
instanceIdxs = newInstanceIdxs
# else:
# break
bestIdxs = sorted(instanceIdxs)
return OutcastInfo(score=bitsave, idxs=bestIdxs, length=length, fromSeq=fromSeq)
def old_findOutcastInstances(Xnorm, seed, length, maxOverlapFraction=.1, fromSeq=None):
minSpacing = max(int((1. - maxOverlapFraction) * length), 1)
dists = distsToRows(Xnorm, seed)
minimaIdxs = nonOverlappingMinima(dists, minSpacing, fromSeq=fromSeq)
# invertMinimaIdxs = np.arange(len(dists))[minimaIdxs]
# print "dists shape: ", dists.shape
# print "found minimaIdxs: ", minimaIdxs
minimaDists = dists[minimaIdxs]
# sort indices of relative minima in increasing order of distance
# TODO use a min heap, since that's O(n) and this is O(nlgn)
sortIdxs = np.argsort(minimaDists)
# unsortIdxs = np.arange(len(minimaDists))[sortIdxs]
minimaIdxs = minimaIdxs[sortIdxs]
minimaDists = minimaDists[sortIdxs]
# initialize with best pair so we don't return anomalies
idxs = [minimaIdxs[0], minimaIdxs[1]]
# totalDist = 2 * minimaDists[1] # don't count self distance, since 0
# maxDist = minimaDists[1]
dist = minimaDists[1]
nextIdx, nextDist = minimaIdxs[2], minimaDists[2]
# bestScore = nextDist * len(idxs) - totalDist
# bestScore = (nextDist - dist) * len(idxs)
# bestScore = (nextDist - dist) * np.log(len(idxs))
bestScore = (nextDist / dist) * np.log(len(idxs))
bestIdxs = idxs[:]
np.set_printoptions(precision=0)
# print "minimaDists:", minimaDists
print "minima diffs:", np.r_[0, minimaDists[1:] - minimaDists[:-1]]
for i in range(2, len(minimaIdxs) - 1):
idx, dist = nextIdx, nextDist
nextIdx, nextDist = minimaIdxs[i+1], minimaDists[i+1]
idxs.append(idx)
# totalDist += dist
# score = nextDist * len(idxs) - totalDist
# score = (nextDist - dist) * len(idxs)
# score = (nextDist - dist) * np.log(len(idxs))
score = (nextDist / dist) * np.log(len(idxs))
if score > bestScore:
# print "new best score {} for idxs {}".format(score, idxs)
bestScore = score
bestIdxs = idxs[:]
# else:
# break
bestIdxs = sorted(bestIdxs)
return OutcastInfo(score=bestScore, idxs=bestIdxs, length=length, fromSeq=fromSeq)
# ================================================================ Main
def randWalkSeq(length):
x = np.cumsum(np.random.randn(length)) # yields a weird plateau thing
# x = np.random.randn(length) # yields a normal distro
return (x - np.mean(x)) / np.std(x)
# return (x - np.mean(x)) / np.std(x, ddof=1) # divide by n-1
def randWalkDists(numPairs=100000, seqLen=32):
dists = np.empty(numPairs)
for i in range(numPairs):
x = randWalkSeq(seqLen)
y = randWalkSeq(seqLen)
diff = x - y
dists[i] = np.dot(diff, diff)
return dists / seqLen
if __name__ == '__main__':
dists = randWalkDists()
import matplotlib.pyplot as plt
nBins = 50
# plt.hist(dists, nBins, normed=True)
# dists = np.sort(dists)
# hist, edges = np.histogram(dists, nBins, normed=True)
# cdf = np.cumsum(hist)
# cdf /= np.max(cdf)
# plt.plot(cdf) # basically a perfectly straight line
# plt.yscale('log')
plt.show()
# ^ interesting; .5 and 3.5 are -3dB points, and basically flat between
# those; full range is of course 0 to 4.
# -this roughly corresponds to a sigmoidal CDF; basically flat near
# the edges, and linear regime (constant derivative) in the middle
# -but why would this yield a sigmoidal CDF?
print "mu, sigma = {}, {}".format(np.mean(dists), np.std(dists))
# prints ~2.007, ~.98, which suggests this is really 2 and 1
# -and note that we're znorming the rand walks and returning squared
# L2 dist over length
# -interesting that it always returns sigma of ~.98, never 1.
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import with_statement
from acq4.devices.Camera import Camera, CameraTask
from acq4.util import Qt
import six
from six.moves import range
import time, sys, traceback
import acq4.util.ptime as ptime
from acq4.util.Mutex import Mutex
from acq4.util.debug import *
import acq4.util.functions as fn
import numpy as np
import scipy
from collections import OrderedDict
import acq4.pyqtgraph as pg
class MockCamera(Camera):
def __init__(self, manager, config, name):
self.camLock = Mutex(Mutex.Recursive) ## Lock to protect access to camera
self.ringSize = 100
self.frameId = 0
self.noise = np.random.normal(size=10000000, loc=100, scale=10) ## pre-generate noise for use in images
if 'images' in config:
self.bgData = {}
self.bgInfo = {}
for obj, filename in config['images'].items():
file = manager.fileHandle(filename)
ma = file.read()
self.bgData[obj] = ma.asarray()
self.bgInfo[obj] = file.info().deepcopy()
self.bgInfo[obj]['depths'] = ma.xvals(0)
else:
self.bgData = mandelbrot(w=4000, maxIter=60).astype(np.float32)
self.bgInfo = None
self.background = None
self.params = OrderedDict([
('triggerMode', 'Normal'),
('exposure', 0.001),
#('binning', (1,1)),
#('region', (0, 0, 512, 512)),
('binningX', 1),
('binningY', 1),
('regionX', 0),
('regionY', 0),
('regionW', 512),
('regionH', 512),
('gain', 1.0),
('sensorSize', (512, 512)),
('bitDepth', 16),
])
self.paramRanges = OrderedDict([
('triggerMode', (['Normal', 'TriggerStart'], True, True, [])),
('exposure', ((0.001, 10.), True, True, [])),
#('binning', ([range(1,10), range(1,10)], True, True, [])),
#('region', ([(0, 511), (0, 511), (1, 512), (1, 512)], True, True, [])),
('binningX', (list(range(1,10)), True, True, [])),
('binningY', (list(range(1,10)), True, True, [])),
('regionX', ((0, 511), True, True, ['regionW'])),
('regionY', ((0, 511), True, True, ['regionH'])),
('regionW', ((1, 512), True, True, ['regionX'])),
('regionH', ((1, 512), True, True, ['regionY'])),
('gain', ((0.1, 10.0), True, True, [])),
('sensorSize', (None, False, True, [])),
('bitDepth', (None, False, True, [])),
])
self.groupParams = {
'binning': ('binningX', 'binningY'),
'region': ('regionX', 'regionY', 'regionW', 'regionH')
}
sig = np.random.normal(size=(512, 512), loc=1.0, scale=0.3)
sig = scipy.ndimage.gaussian_filter(sig, (3, 3))
sig[20:40, 20:40] += 1
sig[sig<0] = 0
self.signal = sig
Camera.__init__(self, manager, config, name) ## superclass will call setupCamera when it is ready.
self.acqBuffer = None
self.frameId = 0
self.lastIndex = None
self.lastFrameTime = None
self.stopOk = False
self.sigGlobalTransformChanged.connect(self.globalTransformChanged)
## generate list of mock cells
cells = np.zeros(20, dtype=[('x', float), ('y', float), ('size', float), ('value', float), ('rate', float), ('intensity', float), ('decayTau', float)])
cells['x'] = np.random.normal(size=cells.shape, scale=100e-6, loc=-1.5e-3)
cells['y'] = np.random.normal(size=cells.shape, scale=100e-6, loc=4.4e-3)
cells['size'] = np.random.normal(size=cells.shape, scale=2e-6, loc=10e-6)
cells['rate'] = np.random.lognormal(size=cells.shape, mean=0, sigma=1) * 1.0
cells['intensity'] = np.random.uniform(size=cells.shape, low=1000, high=10000)
cells['decayTau'] = np.random.uniform(size=cells.shape, low=15e-3, high=500e-3)
self.cells = cells
def setupCamera(self):
pass
def globalTransformChanged(self):
self.background = None
def startCamera(self):
self.cameraStarted = True
self.lastFrameTime = ptime.time()
def stopCamera(self):
self.cameraStopped = True
def getNoise(self, shape):
n = shape[0] * shape[1]
s = np.random.randint(len(self.noise)-n)
d = self.noise[s:s+n]
d.shape = shape
return np.abs(d)
def getBackground(self):
if self.background is None:
w,h = self.params['sensorSize']
tr = self.globalTransform()
if isinstance(self.bgData, dict):
# select data based on objective
obj = self.getObjective()
data = self.bgData[obj]
info = self.bgInfo[obj]
px = info['pixelSize']
pz = info['depths'][1] - info['depths'][0]
m = Qt.QMatrix4x4()
pos = info['transform']['pos']
m.scale(1/px[0], 1/px[1], 1/pz)
m.translate(-pos[0], -pos[1], -info['depths'][0])
tr2 = m * tr
origin = tr2.map(pg.Vector(0, 0, 0))
#print(origin)
origin = [int(origin.x()), int(origin.y()), origin.z()]
## slice data
camRect = Qt.QRect(origin[0], origin[1], w, h)
dataRect = Qt.QRect(0, 0, data.shape[1], data.shape[2])
overlap = camRect.intersected(dataRect)
tl = overlap.topLeft() - camRect.topLeft()
z = origin[2]
z1 = np.floor(z)
z2 = np.ceil(z)
s = (z-z1) / (z2-z1)
z1 = int(np.clip(z1, 0, data.shape[0]-1))
z2 = int(np.clip(z2, 0, data.shape[0]-1))
src1 = data[z1, overlap.left():overlap.left()+overlap.width(), overlap.top():overlap.top()+overlap.height()]
src2 = data[z2, overlap.left():overlap.left()+overlap.width(), overlap.top():overlap.top()+overlap.height()]
src = src1 * (1-s) + src2 * s
bg = np.empty((w, h), dtype=data.dtype)
bg[:] = 100
bg[tl.x():tl.x()+overlap.width(), tl.y():tl.y()+overlap.height()] = src
self.background = bg
#vectors = ([1, 0, 0], [0, 1, 0])
#self.background = pg.affineSlice(data, (w,h), origin, vectors, (1, 2, 0), order=1)
else:
tr = pg.SRTTransform(tr)
m = Qt.QTransform()
m.scale(3e6, 3e6)
m.translate(0.0005, 0.0005)
tr = tr * m
origin = tr.map(pg.Point(0,0))
x = (tr.map(pg.Point(1,0)) - origin)
y = (tr.map(pg.Point(0,1)) - origin)
origin = np.array([origin.x(), origin.y()])
x = np.array([x.x(), x.y()])
y = np.array([y.x(), y.y()])
## slice fractal from pre-rendered data
vectors = (x,y)
self.background = pg.affineSlice(self.bgData, (w,h), origin, vectors, (0,1), order=1)
return self.background
def pixelVectors(self):
tr = self.globalTransform()
origin = tr.map(pg.Point(0,0))
x = (tr.map(pg.Point(1,0)) - origin)
y = (tr.map(pg.Point(0,1)) - origin)
origin = np.array([origin.x(), origin.y()])
x = np.array([x.x(), x.y()])
y = np.array([y.x(), y.y()])
return x,y
def newFrames(self):
"""Return a list of all frames acquired since the last call to newFrames."""
prof = pg.debug.Profiler(disabled=True)
now = ptime.time()
dt = now - self.lastFrameTime
exp = self.getParam('exposure')
bin = self.getParam('binning')
fps = 1.0 / (exp+(40e-3/(bin[0]*bin[1])))
nf = int(dt * fps)
if nf == 0:
return []
self.lastFrameTime = now + exp
prof()
region = self.getParam('region')
prof()
bg = self.getBackground()[region[0]:region[0]+region[2], region[1]:region[1]+region[3]]
prof()
# Start with noise
shape = region[2:]
data = self.getNoise(shape)
#data = np.zeros(shape, dtype=float)
prof()
# Add specimen
data += bg * (exp * 10)
prof()
## update cells
spikes = np.random.poisson(min(dt, 0.4) * self.cells['rate'])
self.cells['value'] *= np.exp(-dt / self.cells['decayTau'])
self.cells['value'] = np.clip(self.cells['value'] + spikes * 0.2, 0, 1)
data[data<0] = 0
# draw cells
px = (self.pixelVectors()[0]**2).sum() ** 0.5
# Generate transform that maps grom global coordinates to image coordinates
cameraTr = pg.SRTTransform3D(self.inverseGlobalTransform())
# note we use binning=(1,1) here because the image is downsampled later.
frameTr = self.makeFrameTransform(region, [1, 1]).inverted()[0]
tr = pg.SRTTransform(frameTr * cameraTr)
for cell in self.cells:
w = cell['size'] / px
pos = pg.Point(cell['x'], cell['y'])
imgPos = tr.map(pos)
start = (int(imgPos.x()), int(imgPos.y()))
stop = (int(start[0]+w), int(start[1]+w))
val = cell['intensity'] * cell['value'] * self.getParam('exposure')
data[max(0,start[0]):max(0,stop[0]), max(0,start[1]):max(0,stop[1])] += val
# Binning
if bin[0] > 1:
data = fn.downsample(data, bin[0], axis=0)
if bin[1] > 1:
data = fn.downsample(data, bin[1], axis=1)
data = data.astype(np.uint16)
prof()
self.frameId += 1
frames = []
for i in range(nf):
frames.append({'data': data, 'time': now + (i / fps), 'id': self.frameId})
prof()
return frames
def quit(self):
pass
def listParams(self, params=None):
"""List properties of specified parameters, or of all parameters if None"""
if params is None:
return self.paramRanges
else:
if isinstance(params, six.string_types):
return self.paramRanges[params]
out = OrderedDict()
for k in params:
out[k] = self.paramRanges[k]
return out
def setParams(self, params, autoRestart=True, autoCorrect=True):
dp = []
ap = {}
for k in params:
if k in self.groupParams:
ap.update(dict(zip(self.groupParams[k], params[k])))
dp.append(k)
params.update(ap)
for k in dp:
del params[k]
self.params.update(params)
newVals = params
restart = True
if autoRestart and restart:
self.restart()
self.sigParamsChanged.emit(newVals)
return (newVals, restart)
def getParams(self, params=None):
if params is None:
params = list(self.listParams().keys())
vals = OrderedDict()
for k in params:
if k in self.groupParams:
vals[k] = list(self.getParams(self.groupParams[k]).values())
else:
vals[k] = self.params[k]
return vals
def setParam(self, param, value, autoRestart=True, autoCorrect=True):
return self.setParams({param: value}, autoRestart=autoRestart, autoCorrect=autoCorrect)
def getParam(self, param):
return self.getParams([param])[param]
def createTask(self, cmd, parentTask):
with self.lock:
return MockCameraTask(self, cmd, parentTask)
class MockCameraTask(CameraTask):
"""Generate exposure waveform when recording with mockcamera.
"""
def __init__(self, dev, cmd, parentTask):
CameraTask.__init__(self, dev, cmd, parentTask)
self._DAQCmd['exposure']['lowLevelConf'] = {'mockFunc': self.makeExpWave}
self.frameTimes = []
def makeExpWave(self):
## Called by DAQGeneric to simulate a read-from-DAQ
# first look up the DAQ configuration so we know the sample rate / number
daq = self.dev.listChannels()['exposure']['device']
cmd = self.parentTask().tasks[daq].cmd
start = self.parentTask().startTime
sampleRate = cmd['rate']
data = np.zeros(cmd['numPts'], dtype=np.uint8)
for f in self.frames:
t = f.info()['time']
exp = f.info()['exposure']
i0 = int((t - start) * sampleRate)
i1 = i0 + int((exp-0.1e-3) * sampleRate)
data[i0:i1] = 1
return data
def mandelbrot(w=500, h=None, maxIter=20, xRange=(-2.0, 1.0), yRange=(-1.2, 1.2)):
x0,x1 = xRange
y0,y1 = yRange
if h is None:
h = int(w * (y1-y0)/(x1-x0))
x = np.linspace(x0, x1, w).reshape(w,1)
y = np.linspace(y0, y1, h).reshape(1,h)
## speed up with a clever initial mask:
x14 = x-0.25
y2 = y**2
q = (x14)**2 + y2
mask = q * (q + x14) > 0.25 * y2
mask &= (x+1)**2 + y2 > 1/16.
mask &= x > -2
mask &= x < 0.7
mask &= y > -1.2
mask &= y < 1.2
img = np.zeros((w,h), dtype=int)
xInd, yInd = np.mgrid[0:w, 0:h]
x = x.reshape(w)[xInd]
y = y.reshape(h)[yInd]
z0 = np.empty((w,h), dtype=np.complex64)
z0.real = x
z0.imag = y
z = z0.copy()
for i in range(maxIter):
z = z[mask]
z0 = z0[mask]
xInd = xInd[mask]
yInd = yInd[mask]
z *= z
z += z0
mask = np.abs(z) < 2.
img[xInd[mask], yInd[mask]] = i % (maxIter-1)
return img
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from itertools import izip
from .basecase import (BaseTestCase, cqlshlog, dedent, at_a_time, cqlsh,
TEST_HOST, TEST_PORT)
from .cassconnect import (get_test_keyspace, testrun_cqlsh, testcall_cqlsh,
cassandra_cursor, split_cql_commands, quote_name)
from .ansi_colors import (ColoredText, lookup_colorcode, lookup_colorname,
lookup_colorletter, ansi_seq)
CONTROL_C = '\x03'
CONTROL_D = '\x04'
class TestCqlshOutput(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertNoHasColors(self, text, msg=None):
self.assertNotRegexpMatches(text, ansi_seq, msg='ANSI CSI sequence found in %r' % text)
def assertHasColors(self, text, msg=None):
self.assertRegexpMatches(text, ansi_seq, msg=msg)
def assertColored(self, coloredtext, colorname):
wanted_colorcode = lookup_colorcode(colorname)
for num, c in enumerate(coloredtext):
if not c.isspace():
ccolor = c.colorcode()
self.assertEqual(ccolor, wanted_colorcode,
msg='Output text %r (char #%d) is colored %s, not %s'
% (coloredtext, num, lookup_colorname(ccolor), colorname))
def assertColorFromTags(self, coloredtext, tags):
for (char, tag) in izip(coloredtext, tags):
if char.isspace():
continue
if tag.isspace():
tag = 'n' # neutral
self.assertEqual(char.colorcode(), lookup_colorletter(tag),
msg='Coloring mismatch.\nExpected coloring: %s\n'
'Actually got: %s\ncolor code: %s'
% (tags, coloredtext.colored_version(), coloredtext.colortags()))
def assertCqlverQueriesGiveColoredOutput(self, queries_and_expected_outputs,
cqlver=(cqlsh.DEFAULT_CQLVER,), **kwargs):
if not isinstance(cqlver, (tuple, list)):
cqlver = (cqlver,)
for ver in cqlver:
self.assertQueriesGiveColoredOutput(queries_and_expected_outputs, cqlver=ver, **kwargs)
def assertQueriesGiveColoredOutput(self, queries_and_expected_outputs, **kwargs):
"""
Allow queries and expected output to be specified in structured tuples,
along with expected color information.
"""
with testrun_cqlsh(tty=True, **kwargs) as c:
for query, expected in queries_and_expected_outputs:
cqlshlog.debug('Testing %r' % (query,))
output = c.cmd_and_response(query).lstrip("\r\n")
c_output = ColoredText(output)
pairs = at_a_time(dedent(expected).split('\n'), 2)
outlines = c_output.splitlines()
for (plain, colorcodes), outputline in zip(pairs, outlines):
self.assertEqual(outputline.plain().rstrip(), plain)
self.assertColorFromTags(outputline, colorcodes)
def test_no_color_output(self):
for termname in ('', 'dumb', 'vt100'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertNoHasColors(c.read_to_next_prompt())
def test_no_prompt_or_colors_output(self):
for termname in ('', 'dumb', 'vt100', 'xterm'):
cqlshlog.debug('TERM=%r' % termname)
query = 'select * from has_all_types limit 1;'
output, result = testcall_cqlsh(prompt=None, env={'TERM': termname},
tty=False, input=query + '\n')
output = output.splitlines()
for line in output:
self.assertNoHasColors(line)
self.assertNotRegexpMatches(line, r'^cqlsh\S*>')
self.assertEqual(len(output), 6,
msg='output: %r' % '\n'.join(output))
self.assertEqual(output[0], '')
self.assertNicelyFormattedTableHeader(output[1])
self.assertNicelyFormattedTableRule(output[2])
self.assertNicelyFormattedTableData(output[3])
self.assertEqual(output[4].strip(), '')
self.assertEqual(output[5].strip(), '(1 rows)')
def test_color_output(self):
for termname in ('xterm', 'unknown-garbage'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertHasColors(c.read_to_next_prompt())
def test_count_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select count(*) from has_all_types;', """
count
MMMMM
-------
5
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_composite_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 10;', """
count
MMMMM
-------
10
GG
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 1000000;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
q = 'select COUNT(*) FROM twenty_rows_composite_table limit 1000000;'
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_static_cf_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select a, b from twenty_rows_table where a in ('1', '13', '2');", """
a | b
RR MM
----+----
1 | 1
YY YY
13 | 13
YY YY
2 | 2
YY YY
(3 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey | column1 | value
RRRRRRR CCCCCCC MMMMM
---------+---------+-------------------------
1 | 1.2 | one point two
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
2 | 2.3 | two point three
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | -0.0001 | negative ten thousandth
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 3.46 | three point four six
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 99 | ninety-nine point oh
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
(5 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_empty_cf_output(self):
# we print the header after CASSANDRA-6910
self.assertCqlverQueriesGiveColoredOutput((
('select * from empty_table;', """
lonelykey | lonelycol
RRRRRRRRR MMMMMMMMM
-----------+-----------
(0 rows)
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
q = 'select * from has_all_types where num = 999;'
# same query should show up as empty in cql 3
self.assertQueriesGiveColoredOutput((
(q, """
num | asciicol | bigintcol | blobcol | booleancol | decimalcol | doublecol | floatcol | intcol | textcol | timestampcol | uuidcol | varcharcol | varintcol
RRR MMMMMMMM MMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMMM MMMMMMMMM MMMMMMMM MMMMMM MMMMMMM MMMMMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMM
-----+----------+-----------+---------+------------+------------+-----------+----------+--------+---------+--------------+---------+------------+-----------
(0 rows)
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_columnless_key_output(self):
q = "select a from twenty_rows_table where a in ('1', '2', '-9192');"
self.assertQueriesGiveColoredOutput((
(q, """
a
R
---
1
Y
2
Y
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_numeric_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('''select intcol, bigintcol, varintcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
intcol | bigintcol | varintcol
MMMMMM MMMMMMMMM MMMMMMMMM
-------------+----------------------+-----------------------------
-12 | 1234567890123456789 | 10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
2147483647 | 9223372036854775807 | 9
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
0 | 0 | 0
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
-2147483648 | -9223372036854775808 | -10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
| |
nnnnnnnnnnn nnnnnnnnnnnnnnnnnnnn nnnnnnnnnnnnnnnnnnnnnnnnnnn
(5 rows)
nnnnnnnn
"""),
('''select decimalcol, doublecol, floatcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
decimalcol | doublecol | floatcol
MMMMMMMMMM MMMMMMMMM MMMMMMMM
------------------+-----------+----------
19952.11882 | 1 | -2.1
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
1E-14 | 1e+07 | 1e+05
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
0.0 | 0 | 0
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
10.0000000000000 | -1004.1 | 1e+08
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
| |
nnnnnnnnnnnnnnnn nnnnnnn nnnnn
(5 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_timestamp_output(self):
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 12:53:20+0000
GGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'Etc/UTC'})
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 07:53:20-0500
GGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'EST'})
def test_boolean_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select num, booleancol from has_all_types where num in (0, 1, 2, 3);', """
num | booleancol
RRR MMMMMMMMMM
-----+------------
0 | True
G GGGGG
1 | True
G GGGGG
2 | False
G GGGGG
3 | False
G GGGGG
(4 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_null_output(self):
# column with metainfo but no values
self.assertCqlverQueriesGiveColoredOutput((
("select k, c, notthere from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
# all-columns, including a metainfo column has no values (cql3)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_string_output_ascii(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from ascii_with_special_chars where k in (0, 1, 2, 3);", r"""
k | val
R MMM
---+-----------------------------------------------
0 | newline:\n
G YYYYYYYYmm
1 | return\rand null\x00!
G YYYYYYmmYYYYYYYYmmmmY
2 | \x00\x01\x02\x03\x04\x05control chars\x06\x07
G mmmmmmmmmmmmmmmmmmmmmmmmYYYYYYYYYYYYYmmmmmmmm
3 | fake special chars\x00\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(4 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_string_output_utf8(self):
# many of these won't line up visually here, to keep the source code
# here ascii-only. note that some of the special Unicode characters
# here will render as double-width or zero-width in unicode-aware
# terminals, but the color-checking machinery here will still treat
# it as one character, so those won't seem to line up visually either.
self.assertCqlverQueriesGiveColoredOutput((
("select * from utf8_with_special_chars where k in (0, 1, 2, 3, 4, 5, 6);", u"""
k | val
R MMM
---+-------------------------------
0 | Normal string
G YYYYYYYYYYYYY
1 | Text with\\nnewlines\\n
G YYYYYYYYYmmYYYYYYYYmm
2 | Text with embedded \\x01 char
G YYYYYYYYYYYYYYYYYYYmmmmYYYYY
3 | \u24c8\u24c5\u24ba\u24b8\u24be\u24b6\u24c1\u2008\u249e\u24a3\u249c\u24ad\u24ae and normal ones
G YYYYYYYYYYYYYYYYYYYYYYYYYYYYY
4 | double wides: \u2f91\u2fa4\u2f9a
G YYYYYYYYYYYYYYYYY
5 | zero width\u200bspace
G YYYYYYYYYYYYYYYY
6 | fake special chars\\x00\\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(7 rows)
nnnnnnnn
""".encode('utf-8')),
), cqlver=cqlsh.DEFAULT_CQLVER, env={'LANG': 'en_US.UTF-8'})
def test_blob_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select num, blobcol from has_all_types where num in (0, 1, 2, 3);", r"""
num | blobcol
RRR MMMMMMM
-----+----------------------
0 | 0x000102030405fffefd
G mmmmmmmmmmmmmmmmmmmm
1 | 0xffffffffffffffffff
G mmmmmmmmmmmmmmmmmmmm
2 | 0x
G mmmmmmmmmmmmmmmmmmmm
3 | 0x80
G mmmmmmmmmmmmmmmmmmmm
(4 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_prompt(self):
with testrun_cqlsh(tty=True, keyspace=None, cqlver=cqlsh.DEFAULT_CQLVER) as c:
self.assertEqual(c.output_header.splitlines()[-1], 'cqlsh> ')
c.send('\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, '\ncqlsh> ')
cmd = "USE \"%s\";\n" % get_test_keyspace().replace('"', '""')
c.send(cmd)
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, '%scqlsh:%s> ' % (cmd, get_test_keyspace()))
c.send('use system;\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, 'use system;\ncqlsh:system> ')
c.send('use NONEXISTENTKEYSPACE;\n')
outputlines = c.read_to_next_prompt().splitlines()
self.assertEqual(outputlines[0], 'use NONEXISTENTKEYSPACE;')
self.assertEqual(outputlines[2], 'cqlsh:system> ')
midline = ColoredText(outputlines[1])
self.assertEqual(midline.plain(),
'InvalidRequest: code=2200 [Invalid query] message="Keyspace \'nonexistentkeyspace\' does not exist"')
self.assertColorFromTags(midline,
"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR")
def test_describe_keyspace_output(self):
fullcqlver = cqlsh.DEFAULT_CQLVER
with testrun_cqlsh(tty=True, cqlver=fullcqlver) as c:
ks = get_test_keyspace()
qks = quote_name(ks)
for cmd in ('describe keyspace', 'desc keyspace'):
for givename in ('system', '', qks):
for semicolon in ('', ';'):
fullcmd = cmd + (' ' if givename else '') + givename + semicolon
desc = c.cmd_and_response(fullcmd)
self.check_describe_keyspace_output(desc, givename or qks, fullcqlver)
# try to actually execute that last keyspace description, with a
# new keyspace name
new_ks_name = 'COPY_OF_' + ks
copy_desc = desc.replace(ks, new_ks_name)
statements = split_cql_commands(copy_desc)
do_drop = True
with cassandra_cursor(cql_version=fullcqlver) as curs:
try:
for stmt in statements:
cqlshlog.debug('TEST EXEC: %s' % stmt)
curs.execute(stmt)
finally:
curs.execute('use system')
if do_drop:
curs.execute('drop keyspace %s' % quote_name(new_ks_name))
def check_describe_keyspace_output(self, output, qksname, fullcqlver):
expected_bits = [r'(?im)^CREATE KEYSPACE %s WITH\b' % re.escape(qksname),
r';\s*$',
r'\breplication = {\'class\':']
for expr in expected_bits:
self.assertRegexpMatches(output, expr)
def test_describe_columnfamily_output(self):
# we can change these to regular expressions if/when it makes sense
# to do so; these will likely be subject to lots of adjustments.
# note columns are now comparator-ordered instead of original-order.
table_desc3 = dedent("""
CREATE TABLE %s.has_all_types (
num int PRIMARY KEY,
asciicol ascii,
bigintcol bigint,
blobcol blob,
booleancol boolean,
decimalcol decimal,
doublecol double,
floatcol float,
intcol int,
textcol text,
timestampcol timestamp,
uuidcol uuid,
varcharcol text,
varintcol varint
) WITH bloom_filter_fp_chance = 0.01
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = ''
AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = '99.0PERCENTILE';
""" % quote_name(get_test_keyspace()))
with testrun_cqlsh(tty=True, cqlver=cqlsh.DEFAULT_CQLVER) as c:
for cmdword in ('describe table', 'desc columnfamily'):
for semicolon in (';', ''):
output = c.cmd_and_response('%s has_all_types%s' % (cmdword, semicolon))
self.assertNoHasColors(output)
self.assertEqual(output, table_desc3)
def test_describe_columnfamilies_output(self):
output_re = r'''
\n
Keyspace [ ] (?P<ksname> \S+ ) \n
-----------* \n
(?P<cfnames> .*? )
\n
'''
ks = get_test_keyspace()
with testrun_cqlsh(tty=True, keyspace=None, cqlver=cqlsh.DEFAULT_CQLVER) as c:
# when not in a keyspace
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
ksnames = []
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '(?xs) ^ ( %s )+ $' % output_re)
for section in re.finditer('(?xs)' + output_re, output):
ksname = section.group('ksname')
ksnames.append(ksname)
cfnames = section.group('cfnames')
self.assertNotIn('\n\n', cfnames)
if ksname == ks:
self.assertIn('ascii_with_special_chars', cfnames)
self.assertIn('system', ksnames)
self.assertIn(quote_name(ks), ksnames)
# when in a keyspace
c.send('USE %s;\n' % quote_name(ks))
c.read_to_next_prompt()
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertEqual(output[0], '\n')
self.assertEqual(output[-1], '\n')
self.assertNotIn('Keyspace %s' % quote_name(ks), output)
self.assertIn('undefined_values_table', output)
def test_describe_cluster_output(self):
output_re = r'''(?x)
^
\n
Cluster: [ ] (?P<clustername> .* ) \n
Partitioner: [ ] (?P<partitionername> .* ) \n
\n
'''
ringinfo_re = r'''
Range[ ]ownership: \n
(
[ ] .*? [ ][ ] \[ ( \d+ \. ){3} \d+ \] \n
)+
\n
'''
with testrun_cqlsh(tty=True, keyspace=None, cqlver=cqlsh.DEFAULT_CQLVER) as c:
# not in a keyspace
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + '$')
c.send('USE %s;\n' % quote_name(get_test_keyspace()))
c.read_to_next_prompt()
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + ringinfo_re + '$')
def test_describe_schema_output(self):
with testrun_cqlsh(tty=True) as c:
for semicolon in ('', ';'):
output = c.cmd_and_response('desc full schema' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '^\nCREATE KEYSPACE')
self.assertIn("\nCREATE KEYSPACE system WITH replication = {'class': 'LocalStrategy'} AND durable_writes = true;\n",
output)
self.assertRegexpMatches(output, ';\s*$')
def test_show_output(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('show version;')
self.assertRegexpMatches(output,
'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Native protocol \S+\]$')
output = c.cmd_and_response('show host;')
self.assertHasColors(output)
self.assertRegexpMatches(output, '^Connected to .* at %s:%d\.$'
% (re.escape(TEST_HOST), TEST_PORT))
def test_eof_prints_newline(self):
with testrun_cqlsh(tty=True) as c:
c.send(CONTROL_D)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, '\n')
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_exit_prints_no_newline(self):
for semicolon in ('', ';'):
with testrun_cqlsh(tty=True) as c:
cmd = 'exit%s\n' % semicolon
c.send(cmd)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, cmd)
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_help_types(self):
with testrun_cqlsh(tty=True) as c:
c.cmd_and_response('help types')
def test_help(self):
pass
def test_printing_parse_error(self):
pass
def test_printing_lex_error(self):
pass
def test_multiline_statements(self):
pass
def test_cancel_statement(self):
pass
def test_printing_integrity_error(self):
pass
def test_printing_cql_error(self):
pass
def test_empty_line(self):
pass
def test_user_types_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select addresses from users;", r"""
addresses
MMMMMMMMM
--------------------------------------------------------------------------------------------------------------------------------------------
{{city: 'Chelyabinsk', address: '3rd street', zip: null}, {city: 'Chigirinsk', address: null, zip: '676722'}}
BBYYYYBBYYYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYBBYYYBBRRRRBBBBYYYYBBYYYYYYYYYYYYBBYYYYYYYBBRRRRBBYYYBBYYYYYYYYBB
{{city: 'Austin', address: '902 East 5th St. #202', zip: '78702'}, {city: 'Sunnyvale', address: '292 Gibraltar Drive #107', zip: '94089'}}
BBYYYYBBYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBBBBYYYYBBYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
self.assertCqlverQueriesGiveColoredOutput((
("select phone_numbers from users;", r"""
phone_numbers
MMMMMMMMMMMMM
-------------------------------------------------------------------------------------
{{country: null, number: '03'}, {country: '+7', number: null}}
BBYYYYYYYBBRRRRBBYYYYYYBBYYYYBBBBYYYYYYYBBYYYYBBYYYYYYBBRRRRBB
{{country: '+1', number: '512-537-7809'}, {country: '+44', number: '208 622 3021'}}
BBYYYYYYYBBYYYYBBYYYYYYBBYYYYYYYYYYYYYYBBBBYYYYYYYBBYYYYYBBYYYYYYBBYYYYYYYYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
def test_user_types_with_collections(self):
self.assertCqlverQueriesGiveColoredOutput((
("select info from songs;", r"""
info
MMMM
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
{founded: 188694000, members: {'Adrian Smith', 'Bruce Dickinson', 'Dave Murray', 'Janick Gers', 'Nicko McBrain', 'Steve Harris'}, description: 'Pure evil metal'}
BYYYYYYYBBGGGGGGGGGBBYYYYYYYBBBYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYBBBYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYB
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
self.assertCqlverQueriesGiveColoredOutput((
("select tags from songs;", r"""
tags
MMMM
-------------------------------------------------
{tags: {'genre': 'metal', 'origin': 'england'}}
BYYYYBBBYYYYYYYBBYYYYYYYBBYYYYYYYYBBYYYYYYYYYBB
(1 rows)
nnnnnnnn
"""),
), cqlver=cqlsh.DEFAULT_CQLVER)
| |
# AnalogClock's main class
# E. A. Tacao <e.a.tacao |at| estadao.com.br>
# http://j.domaindlx.com/elements28/wxpython/
# 15 Fev 2006, 22:00 GMT-03:00
# Distributed under the wxWidgets license.
#
# For more info please see the __init__.py file.
import wx
from styles import *
from helpers import Dyer, Face, Hand, HandSet, TickSet, Box
from setup import Setup
#----------------------------------------------------------------------
class AnalogClock(wx.PyWindow):
"""An analog clock."""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NO_BORDER, name="AnalogClock",
clockStyle=DEFAULT_CLOCK_STYLE,
minutesStyle=TICKS_CIRCLE, hoursStyle=TICKS_POLY):
wx.PyWindow.__init__(self, parent, id, pos, size, style, name)
# Base size for scale calc purposes.
self.basesize = wx.Size(348, 348)
# Store some references.
self.clockStyle = clockStyle
self.minutesStyle = minutesStyle
self.hoursStyle = hoursStyle
self.DrawHands = self._drawHands
self.DrawBox = self._drawBox
self.RecalcCoords = self._recalcCoords
self.shadowOffset = 3
self.allHandStyles = [SHOW_HOURS_HAND,
SHOW_MINUTES_HAND,
SHOW_SECONDS_HAND]
# Initialize clock face.
#
# By default we don't use colours or borders on the clock face.
bg = self.GetBackgroundColour()
face = Face(dyer=Dyer(bg, 0, bg))
# Initialize tick marks.
#
# TickSet is a set of tick marks; there's always two TickSets defined
# regardless whether they're being shown or not.
ticksM = TickSet(self, style=minutesStyle, size=5, kind="minutes")
ticksH = TickSet(self, style=hoursStyle, size=25, kind="hours",
rotate=clockStyle&ROTATE_TICKS)
# Box holds the clock face and tick marks.
self.Box = Box(self, face, ticksM, ticksH)
# Initialize hands.
#
# HandSet is the set of hands; there's always one HandSet defined
# regardless whether hands are being shown or not.
#
# A 'lenfac = 0.95', e.g., means that the lenght of that hand will
# be 95% of the maximum allowed hand lenght ('nice' maximum lenght).
handH = Hand(size=7, lenfac=0.7)
handM = Hand(size=5, lenfac=0.95)
handS = Hand(size=1, lenfac=0.95)
self.Hands = HandSet(self, handH, handM, handS)
# Create the customization dialog.
self.Setup = None
# Make a context menu.
popup1 = wx.NewId()
popup2 = wx.NewId()
cm = self.cm = wx.Menu()
cm.Append(popup1, "Customize...")
cm.Append(popup2, "About...")
# Set event handlers.
self.Bind(wx.EVT_SIZE, self._OnSize)
self.Bind(wx.EVT_PAINT, self._OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda evt: None)
self.Bind(wx.EVT_TIMER, self._OnTimer)
self.Bind(wx.EVT_WINDOW_DESTROY, self._OnDestroyWindow)
self.Bind(wx.EVT_CONTEXT_MENU, self._OnContextMenu)
self.Bind(wx.EVT_MENU, self._OnShowSetup, id=popup1)
self.Bind(wx.EVT_MENU, self._OnShowAbout, id=popup2)
# Set initial size based on given size, or best size
self.SetInitialSize(size)
# Do initial drawing (in case there is not an initial size event)
self.RecalcCoords(self.GetSize())
self.DrawBox()
# Initialize the timer that drives the update of the clock face.
# Update every half second to ensure that there is at least one true
# update during each realtime second.
self.timer = wx.Timer(self)
self.timer.Start(500)
def DoGetBestSize(self):
# Just pull a number out of the air. If there is a way to
# calculate this then it should be done...
size = wx.Size(50,50)
self.CacheBestSize(size)
return size
def _OnSize(self, evt):
size = self.GetClientSize()
if size.x < 1 or size.y < 1:
return
self.RecalcCoords(size)
self.DrawBox()
def _OnPaint(self, evt):
dc = wx.BufferedPaintDC(self)
self.DrawHands(dc)
def _OnTimer(self, evt):
self.Refresh(False)
self.Update()
def _OnDestroyWindow(self, evt):
self.timer.Stop()
del self.timer
def _OnContextMenu(self, evt):
self.PopupMenu(self.cm)
def _OnShowSetup(self, evt):
if self.Setup is None:
self.Setup = Setup(self)
self.Setup.Show()
self.Setup.Raise()
def _OnShowAbout(self, evt):
msg = "AnalogClock\n\n" \
"by Several folks on wxPython-users\n" \
"with enhancements from E. A. Tacao."
title = "About..."
style = wx.OK|wx.ICON_INFORMATION
dlg = wx.MessageDialog(self, msg, title, style)
dlg.ShowModal()
dlg.Destroy()
def _recalcCoords(self, size):
"""
Recalculates all coordinates/geometry and inits the faceBitmap
to make sure the buffer is always the same size as the window.
"""
self.faceBitmap = wx.EmptyBitmap(*size.Get())
# Recalc all coords.
scale = min([float(size.width) / self.basesize.width,
float(size.height) / self.basesize.height])
centre = wx.Point(size.width / 2., size.height / 2.)
self.Box.RecalcCoords(size, centre, scale)
self.Hands.RecalcCoords(size, centre, scale)
# Try to find a 'nice' maximum length for the hands so that they won't
# overlap the tick marks. OTOH, if you do want to allow overlapping the
# lenfac value (defined on __init__ above) has to be set to
# something > 1.
niceradius = self.Box.GetNiceRadiusForHands(centre)
self.Hands.SetMaxRadius(niceradius)
def _drawBox(self):
"""Draws clock face and tick marks onto the faceBitmap."""
dc = wx.BufferedDC(None, self.faceBitmap)
dc.SetBackground(wx.Brush(self.GetBackgroundColour(), wx.SOLID))
dc.Clear()
self.Box.Draw(dc)
def _drawHands(self, dc):
"""
Draws the face bitmap, created on the last DrawBox call, and
clock hands.
"""
dc.DrawBitmap(self.faceBitmap, 0, 0)
self.Hands.Draw(dc)
# Public methods --------------------------------------------------
def GetHandSize(self, target=ALL):
"""Gets thickness of hands."""
return self.Hands.GetSize(target)
def GetHandFillColour(self, target=ALL):
"""Gets fill colours of hands."""
return self.Hands.GetFillColour(target)
def GetHandBorderColour(self, target=ALL):
"""Gets border colours of hands."""
return self.Hands.GetBorderColour(target)
def GetHandBorderWidth(self, target=ALL):
"""Gets border widths of hands."""
return self.Hands.GetBorderWidth(target)
def GetTickSize(self, target=ALL):
"""Gets sizes of ticks."""
return self.Box.GetTickSize(target)
def GetTickFillColour(self, target=ALL):
"""Gets fill colours of ticks."""
return self.Box.GetTickFillColour(target)
def GetTickBorderColour(self, target=ALL):
"""Gets border colours of ticks."""
return self.Box.GetTickBorderColour(target)
def GetTickBorderWidth(self, target=ALL):
"""Gets border widths of ticks."""
return self.Box.GetTickBorderWidth(target)
def GetTickPolygon(self, target=ALL):
"""
Gets lists of points to be used as polygon shapes
when using the TICKS_POLY style.
"""
return self.Box.GetTickPolygon(target)
def GetTickFont(self, target=ALL):
"""
Gets fonts for tick marks when using TICKS_DECIMAL or
TICKS_ROMAN style.
"""
return self.Box.GetTickFont(target)
def GetTickOffset(self, target=ALL):
"""Gets the distance of tick marks for hours from border."""
return self.Box.GetTickOffset(target)
def GetFaceFillColour(self):
"""Gets fill colours of watch."""
return self.Box.Face.GetFillColour()
def GetFaceBorderColour(self):
"""Gets border colours of watch."""
return self.Box.Face.GetBorderColour()
def GetFaceBorderWidth(self):
"""Gets border width of watch."""
return self.Box.Face.GetBorderWidth()
def GetShadowColour(self):
"""Gets the colour to be used to draw shadows."""
a_clock_part = self.Box
return a_clock_part.GetShadowColour()
def GetClockStyle(self):
"""Returns the current clock style."""
return self.clockStyle
def GetTickStyle(self, target=ALL):
"""Gets the tick style(s)."""
return self.Box.GetTickStyle(target)
def Reset(self):
"""
Forces an immediate recalculation and redraw of all clock
elements.
"""
size = self.GetClientSize()
if size.x < 1 or size.y < 1:
return
self.RecalcCoords(size)
self.DrawBox()
self.Refresh(False)
def SetHandSize(self, size, target=ALL):
"""Sets thickness of hands."""
self.Hands.SetSize(size, target)
def SetHandFillColour(self, colour, target=ALL):
"""Sets fill colours of hands."""
self.Hands.SetFillColour(colour, target)
def SetHandBorderColour(self, colour, target=ALL):
"""Sets border colours of hands."""
self.Hands.SetBorderColour(colour, target)
def SetHandBorderWidth(self, width, target=ALL):
"""Sets border widths of hands."""
self.Hands.SetBorderWidth(width, target)
def SetTickSize(self, size, target=ALL):
"""Sets sizes of ticks."""
self.Box.SetTickSize(size, target)
self.Reset()
def SetTickFillColour(self, colour, target=ALL):
"""Sets fill colours of ticks."""
self.Box.SetTickFillColour(colour, target)
self.Reset()
def SetTickBorderColour(self, colour, target=ALL):
"""Sets border colours of ticks."""
self.Box.SetTickBorderColour(colour, target)
self.Reset()
def SetTickBorderWidth(self, width, target=ALL):
"""Sets border widths of ticks."""
self.Box.SetTickBorderWidth(width, target)
self.Reset()
def SetTickPolygon(self, polygon, target=ALL):
"""
Sets lists of points to be used as polygon shapes
when using the TICKS_POLY style.
"""
self.Box.SetTickPolygon(polygon, target)
self.Reset()
def SetTickFont(self, font, target=ALL):
"""
Sets fonts for tick marks when using text-based tick styles
such as TICKS_DECIMAL or TICKS_ROMAN.
"""
self.Box.SetTickFont(font, target)
self.Reset()
def SetTickOffset(self, offset, target=ALL):
"""Sets the distance of tick marks for hours from border."""
self.Box.SetTickOffset(offset, target)
self.Reset()
def SetFaceFillColour(self, colour):
"""Sets fill colours of watch."""
self.Box.Face.SetFillColour(colour)
self.Reset()
def SetFaceBorderColour(self, colour):
"""Sets border colours of watch."""
self.Box.Face.SetBorderColour(colour)
self.Reset()
def SetFaceBorderWidth(self, width):
"""Sets border width of watch."""
self.Box.Face.SetBorderWidth(width)
self.Reset()
def SetShadowColour(self, colour):
"""Sets the colour to be used to draw shadows."""
self.Hands.SetShadowColour(colour)
self.Box.SetShadowColour(colour)
self.Reset()
def SetClockStyle(self, style):
"""
Set the clock style, according to the options below.
==================== ================================
SHOW_QUARTERS_TICKS Show marks for hours 3, 6, 9, 12
SHOW_HOURS_TICKS Show marks for all hours
SHOW_MINUTES_TICKS Show marks for minutes
SHOW_HOURS_HAND Show hours hand
SHOW_MINUTES_HAND Show minutes hand
SHOW_SECONDS_HAND Show seconds hand
SHOW_SHADOWS Show hands and marks shadows
ROTATE_TICKS Align tick marks to watch
OVERLAP_TICKS Draw tick marks for minutes even
when they match the hours marks.
==================== ================================
"""
self.clockStyle = style
self.Box.SetIsRotated(style & ROTATE_TICKS)
self.Reset()
def SetTickStyle(self, style, target=ALL):
"""
Set the tick style, according to the options below.
================= ======================================
TICKS_NONE Don't show tick marks.
TICKS_SQUARE Use squares as tick marks.
TICKS_CIRCLE Use circles as tick marks.
TICKS_POLY Use a polygon as tick marks. A
polygon can be passed using
SetTickPolygon, otherwise the default
polygon will be used.
TICKS_DECIMAL Use decimal numbers as tick marks.
TICKS_ROMAN Use Roman numbers as tick marks.
TICKS_BINARY Use binary numbers as tick marks.
TICKS_HEX Use hexadecimal numbers as tick marks.
================= ======================================
"""
self.Box.SetTickStyle(style, target)
self.Reset()
def SetBackgroundColour(self, colour):
"""Overriden base wx.Window method."""
wx.Window.SetBackgroundColour(self, colour)
self.Reset()
def SetForegroundColour(self, colour):
"""
Overriden base wx.Window method. This method sets a colour for
all hands and ticks at once.
"""
wx.Window.SetForegroundColour(self, colour)
self.SetHandFillColour(colour)
self.SetHandBorderColour(colour)
self.SetTickFillColour(colour)
self.SetTickBorderColour(colour)
self.Reset()
def SetWindowStyle(self, *args, **kwargs):
"""Overriden base wx.Window method."""
size = self.GetSize()
self.Freeze()
wx.Window.SetWindowStyle(self, *args, **kwargs)
self.SetSize((10, 10))
self.SetSize(size)
self.Thaw()
def SetWindowStyleFlag(self, *args, **kwargs):
"""Overriden base wx.Window method."""
self.SetWindowStyle(*args, **kwargs)
# For backwards compatibility -----------------------------------------
class AnalogClockWindow(AnalogClock):
"""
A simple derived class that provides some backwards compatibility
with the old analogclock module.
"""
def SetTickShapes(self, tsh, tsm=None):
self.SetTickPolygon(tsh)
def SetHandWeights(self, h=None, m=None, s=None):
if h:
self.SetHandSize(h, HOUR)
if m:
self.SetHandSize(m, MINUTE)
if s:
self.SetHandSize(s, SECOND)
def SetHandColours(self, h=None, m=None, s=None):
if h and not m and not s:
m=h
s=h
if h:
self.SetHandBorderColour(h, HOUR)
self.SetHandFillColour(h, HOUR)
if m:
self.SetHandBorderColour(m, MINUTE)
self.SetHandFillColour(m, MINUTE)
if s:
self.SetHandBorderColour(s, SECOND)
self.SetHandFillColour(s, SECOND)
def SetTickColours(self, h=None, m=None):
if not m:
m=h
if h:
self.SetTickBorderColour(h, HOUR)
self.SetTickFillColour(h, HOUR)
if m:
self.SetTickBorderColour(m, MINUTE)
self.SetTickFillColour(m, MINUTE)
def SetTickSizes(self, h=None, m=None):
if h:
self.SetTickSize(h, HOUR)
if m:
self.SetTickSize(m, MINUTE)
def SetTickFontss(self, h=None, m=None):
if h:
self.SetTickFont(h, HOUR)
if m:
self.SetTickFont(m, MINUTE)
def SetMinutesOffset(self, o):
pass
def SetShadowColour(self, s):
pass
def SetWatchPenBrush(self, p=None, b=None):
if p:
self.SetFaceBorderColour(p.GetColour())
self.SetFaceBorderWidth(p.GetWidth())
if b:
self.SetFaceFillColour(b.GetColour())
def SetClockStyle(self, style):
style |= SHOW_HOURS_HAND|SHOW_MINUTES_HAND|SHOW_SECONDS_HAND
AnalogClock.SetClockStyle(self, style)
def SetTickStyles(self, h=None, m=None):
if h:
self.SetTickStyle(h, HOUR)
if m:
self.SetTickStyle(h, MINUTE)
# Test stuff ----------------------------------------------------------
if __name__ == "__main__":
print wx.VERSION_STRING
class AcDemoApp(wx.App):
def OnInit(self):
frame = wx.Frame(None, -1, "AnalogClock", size=(375, 375))
clock = AnalogClock(frame)
frame.CentreOnScreen()
frame.Show()
return True
acApp = AcDemoApp(0)
acApp.MainLoop()
#
##
### eof
| |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: webapps/__init__.py
# Purpose: music21 functions for implementing web interfaces
#
# Authors: Lars Johnson
# Michael Scott Cuthbert
#
# Copyright: (c) 2012-14 The music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Webapps is a module designed for using music21 with a webserver.
This file includes the classes and functions used to parse and process requests to music21 running on a server.
For information about how to set up a server to use music21, look at the files in webapps.server
For examples of application-specific commands and templates, see webapps.apps
For details about various output template options available, see webapps.templates
**Overview of Processing a Request**
1. The GET and POST data from the request are combined into an agenda object.
The POST data can be in the formats ``'application/json', 'multipart/form-data' or
'application/x-www-form-urlencoded'``.
For more information, see the documentation for Agenda and makeAgendaFromRequest
2. If an appName is specified, additional data and commands are added to the agenda.
For more information, see the applicationInitializers in apps.py.
3. A CommandProcessor is created for the agenda
4. The processor parses its dataDict into primitives or music21 objects and saves them
to a parsedDataDict. For more information, see ``commandProcessor._parseData()``
5. The processor executes its commandList, modifying its internal parsedDataDict.
For more information, see :meth:`~music21.webapps.CommandProcessor.executeCommands`
6. If outputTemplate is specified, the processor uses a template to generate and output.
For more information, see :meth:`~music21.webapps.CommandProcessor.getOutput` and the templates in templates.py
7. Otherwise, the data will be returned as JSON, where the variables in the agenda's
returnDict specify which variables to include in the returned JSON.
8. If an error occurs, an error message will be returned to the user
**Full JSON Example:**
Below is an example of a complete JSON request::
{
"dataDict": {
"myNum": {
"fmt": "int",
"data": "23"
}
},
"returnDict": {
"myNum": "int",
"ho": "int"
},
"commandList": [
{
"function": "corpus.parse",
"argList": [
"'bwv7.7'"
],
"resultVar": "sc"
},
{
"method": "transpose",
"argList": [
"'p5'"
],
"caller": "sc",
"resultVar": "sc"
},
{
"attribute": "flat",
"caller": "sc",
"resultVar": "scFlat"
},
{
"attribute": "highestOffset",
"caller": "scFlat",
"resultVar": "ho"
}
]
}
'''
import collections
import sys
import unittest
# music21 imports
from music21 import common
from music21 import converter
from music21 import stream #@UnusedImport
from music21 import corpus #@UnusedImport
from music21 import note #@UnusedImport
from music21 import features #@UnusedImport
from music21 import harmony #@UnusedImport
from music21 import clef #@UnusedImport
from music21 import tempo #@UnusedImport
from music21.alpha.theoryAnalysis import theoryAnalyzer #@UnusedImport
from music21.ext import six
if six.PY2:
import apps
import commands
import templates
else:
from music21.alpha.webapps import templates # @Reimport
from music21.alpha.webapps import apps # @Reimport
from music21.alpha.webapps import commands # @Reimport
# python library imports
import json
import zipfile #@UnusedImport
import cgi
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
import re #@UnusedImport
import traceback
from music21.ext.six import StringIO
if six.PY3:
import io
file = io.IOBase # @ReservedAssignment
unicode = str # @ReservedAssignment
#-------------------------------------------------------------------------------
# Valid format types for data input to the server
availableDataFormats = ['xml',
'musicxml',
'abc',
'str',
'string',
'bool',
'boolean'
'int',
'reprtext',
'list',
'int',
'float',
'file']
# Commands of type function (no caller) must be in this list
availableFunctions = ['checkLeadSheetPitches',
'colorAllChords',
'colorAllNotes',
'colorResults',
'commands.generateIntervals',
'commands.reduction',
'commands.runPerceivedDissonanceAnalysis',
'commands.writeMIDIFileToServer',
'converter.parse',
'corpus.parse',
'createMensuralCanon',
'getResultsString',
'generateChords',
'reduction',
'stream.transpose',
'tempo.MetronomeMark',
'theoryAnalyzer.identifyHiddenFifths',
'theoryAnalyzer.identifyHiddenOctaves',
'theoryAnalyzer.identifyParallelFifths',
'theoryAnalyzer.identifyParallelOctaves',
]
# Commands of type method (have a caller) must be in this list
availableMethods = ['__getitem__',
'augmentOrDiminish',
'chordify',
'insert',
'measures',
'transpose'
]
# Commands of type attribute must be in this list
availableAttribtues = ['highestOffset',
'flat',
'_theoryScore',
'musicxml']
# Commands of type attribute must be in this list
availableOutputTemplates = ['templates.noteflightEmbed',
'templates.musicxmlText',
'templates.musicxmlFile',
'templates.vexflow',
'templates.braille']
#-------------------------------------------------------------------------------
def ModWSGIApplication(environ, start_response):
'''
Application function in proper format for a mod_wsgi Application:
Reads the contents of a post request, and passes the data string to
webapps.processDataString for further processing.
For an example of how to install this application on a server see music21.webapps.server.wsgiapp.py
The request to the application should have the following structures:
>>> from music21.ext.six import StringIO
>>> environ = {} # environ is usually created by the server. Manually constructing dictionary for demonstrated
>>> wsgiInput = StringIO() # wsgi.input is usually a buffer containing the contents of a POST request. Using StringIO to demonstrate
>>> unused = wsgiInput.write('{"dataDict":{"a":{"data":3}},"returnDict":{"a":"int"}}')
>>> unused = wsgiInput.seek(0)
>>> environ['wsgi.input'] = wsgiInput
>>> environ['QUERY_STRING'] = ""
>>> environ['DOCUMENT_ROOT'] = "/Library/WebServer/Documents"
>>> environ['HTTP_HOST'] = "ciconia.mit.edu"
>>> environ['SCRIPT_NAME'] = "/music21/unifiedinterface"
>>> environ['CONTENT_TYPE'] = "application/json"
>>> start_response = lambda status, headers: None # usually called by mod_wsgi server. Used to initiate response
>>> alpha.webapps.ModWSGIApplication(environ, start_response)
[...'{"dataDict": {"a": ...}, "errorList": [], "status": "success"}']
'''
# Get content of request: is in a file-like object that will need to be .read() to get content
requestFormat = str(environ.get("CONTENT_TYPE")).split(';')[0]
requestInput = environ['wsgi.input']
try:
agenda = makeAgendaFromRequest(requestInput,environ,requestFormat)
processor = CommandProcessor(agenda)
#(responseData, responseContentType) = (str(processor.parsedDataDict), 'text/plain')
processor.executeCommands()
(responseData, responseContentType) = processor.getOutput()
# Handle any unexpected exceptions
# TODO: Change output based on environment variables...
except Exception as e:
errorData = 'music21_server_error: %s\n' % e
errorData += traceback.format_exc()
sys.stderr.write(errorData)
(responseData, responseContentType) = (errorData, 'text/plain')
start_response('200 OK', [('Content-type', responseContentType),
('Content-Length', str(len(responseData)))])
return [responseData]
#-------------------------------------------------------------------------------
def makeAgendaFromRequest(requestInput, environ, requestType = None):
'''
Combines information from POST data and server info into an agenda object
that can be used with the CommandProcessor.
Takes in a file-like requestInput (has ``.read()``) containing POST data,
a dictionary-like environ from the server containing at a minimum a value for the
keys QUERY_STRING,
and a requestType specifying the content-type of the POST data
('application/json','multipart/form-data', etc.)
Note that variables specified via query string will be returned as a list if
they are specified more than once (e.g. ``?b=3&b=4`` will yeld ``['3', '4']``
as the value of b
requestInput should be buffer from the server application. Using StringIO for demonstration
>>> from music21.ext.six import StringIO
>>> requestInput = StringIO()
>>> unused = requestInput.write('{"dataDict":{"a":{"data":3}}}')
>>> unused = requestInput.seek(0)
>>> environ = {"QUERY_STRING":"b=3"}
>>> agenda = alpha.webapps.makeAgendaFromRequest(requestInput, environ, 'application/json')
>>> from pprint import pprint as pp
>>> pp(agenda)
{'commandList': [],
'dataDict': {'a': {'data': 3}, 'b': {'data': '3'}},
'returnDict': {}}
(the ellipses above comment out the u unicode prefix in PY2)
>>> environ2 = {"QUERY_STRING":"a=2&b=3&b=4"}
>>> agenda2 = alpha.webapps.makeAgendaFromRequest(requestInput, environ2, 'multipart/form-data')
Note that the 3 in a:data becomes '2' -- a string.
>>> pp(agenda2)
{'commandList': [],
'dataDict': {'a': {'data': '2'}, 'b': {'data': ['3', '4']}},
'returnDict': {}}
'''
agenda = Agenda()
combinedFormFields = {}
# Use requestType to process the POST data into the agenda
if requestType is None:
requestType = str(environ.get("CONTENT_TYPE")).split(';')[0]
if requestType == 'application/json':
combinedFormFields['json'] = requestInput.read()
elif requestType == 'multipart/form-data':
postFormFields = cgi.FieldStorage(requestInput, environ = environ)
for key in postFormFields:
if hasattr(postFormFields[key],'filename') and postFormFields[key].filename != None: # Its an uploaded file
value = postFormFields[key].file
else:
value = postFormFields.getlist(key)
if len(value) == 1:
value = value[0]
combinedFormFields[key] = value
elif requestType == 'application/x-www-form-urlencoded':
postFormFields =urlparse.parse_qs(requestInput.read())
for (key, value) in postFormFields.items():
if len(value) == 1:
value = value[0]
combinedFormFields[key] = value
# Load json into the agenda first
if 'json' in combinedFormFields:
agenda.loadJson(combinedFormFields['json'])
# Add GET fields:
getFormFields = urlparse.parse_qs(environ['QUERY_STRING']) # Parse GET request in URL to dict
for (key,value) in getFormFields.items():
if len(value) == 1:
value = value[0]
combinedFormFields[key] = value
# Add remaining form fields to agenda
for (key, value) in combinedFormFields.items():
if key in ['dataDict','commandList','returnDict','json']: # These values can only be specified via JSON, JSON already loaded
pass
elif key in ['appName','outputTemplate','outputArgList']:
agenda[key] = value
elif type(value) == file:
agenda['dataDict'][key] = collections.OrderedDict([("data",value),
("fmt","file")])
else: # Put in data dict
agenda['dataDict'][key] = {"data": value}
# Allows the appName to direct final processing
if 'appName' in agenda:
setupApplication(agenda)
return agenda
def setupApplication(agenda, appName = None):
'''
Given an agenda, determines which application is desired either from the appName parameter
or if the appName parameter is none, from the value associated with the "appName" key in the agenda.
If the application name is a valid application name, calls the appropriate application initializer
from music21.webapps.apps.py on the agenda.
'''
if appName == None:
if 'appName' in agenda:
appName = agenda['appName']
else:
raise Exception("appName is None and no appName key in agenda.")
if appName not in apps.applicationInitializers:
raise Exception ("Unknown appName: " + appName)
# Run initializer on agenda - edits it in place.
apps.applicationInitializers[appName](agenda)
#-------------------------------------------------------------------------------
class Agenda(dict):
'''
Subclass of dictionary that represents data and commands to be processed by a CommandProcessor.
The Agenda contains the following keys:
* **'dataDict'** whose value is a dictionary specifying data to be input to the processor of the form::
"dataDict" : {"<VARIABLE_1_NAME>": {"data": "<VARIABLE_1_DATA>",
"fmt": "<VARIABLE_1_FMT>"},
"<VARIABLE_2_NAME>": {"data": "<VARIABLE_2_DATA>",
"fmt": "<VARIABLE_2_FMT>"},
etc.
}
where the variable formats are elements of availableDataFormats ("str","int","musicxml", etc.)
* **'commandList'** whose value is a list specifying commands to be executed by the processor of the form::
"commandList" : [{"<CMD_1_TYPE>": "<CMD_2_COMMAND_NAME>",
"resultVar": "<CMD_1_RESULT_VARIABLE>",
"caller": "<CMD_1_CALLER>",
"command": "<CMD_1_COMMAND_NAME>",
"argList": ['<CMD_1_ARG_1>','<CMD_1_ARG_2>'...]},
"<CMD_2_TYPE>": "<CMD_2_COMMAND_NAME>",
"resultVar": "<CMD_2_RESULT_VARIABLE>",
"caller": "<CMD_2_CALLER>",
"argList": ['<CMD_2_ARG_1>','<CMD_2_ARG_2>'...]},
etc.
]
Calling :meth:`~music21.webapps.CommandProcessor.executeCommands` iterates through
the commandList sequentially, calling the equivalent of
``<CMD_n_RESULT_VARAIBLE> = <CMD_n_CALLER>.<CMD_n_COMMAND_NAME>(<CMD_n_ARG_1>,<CMD_n_ARG_2>...)``
where the command TYPE is "function", "method", or "attribute"
* **'returnDict'** whose value is a list specifying the variables to be returned from the server::
"returnDict" : {"<VARIABLE_1_NAME>": "<VARIABLE_1_FORMAT",
"<VARIABLE_2_NAME>": "<VARIABLE_2_FORMAT", etc.}
returnDict is used to limit JSON output to only the relevant variables. If returnDict is not specified,
the entire set of variables in the processor's environment will be returned in string format.
* **'outputTemplate'** which specifies the return template to be used
* **'outputArgList'** which specifies what arguments to pass the return template
'''
def __init__(self):
'''
Agenda initialization function:
Initializes core key values 'dataDict', 'commandList', 'returnDict'
>>> from pprint import pprint as pp
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
'''
self['dataDict'] = dict()
self['commandList'] = list()
self['returnDict'] = dict()
dict.__init__(self)
def __setitem__(self, key, value):
'''
Raises an error if one attempts to set 'dataDict', 'returnDict', or 'commandList'
to values that are not of the corresponding dict/list type.
>>> from pprint import pprint as pp
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
>>> agenda['dataDict'] = {"a":{"data":2}}
>>> pp(agenda)
{'commandList': [], 'dataDict': {'a': {'data': 2}}, 'returnDict': {}}
'''
if key in ['dataDict','returnDict'] and type(value) is not dict:
raise Exception('value for key: '+ str(key) + ' must be dict')
return
elif key in ['commandList'] and type(value) is not list:
raise Exception('value for key: '+ str(key) + ' must be list')
return
dict.__setitem__(self,key,value)
def addData(self, variableName, data, fmt = None):
'''
Given a variable name, data, and optionally format, constructs the proper dataDictElement structure,
and adds it to the dataDict of the agenda.
>>> from pprint import pprint as pp
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
>>> agenda.addData('a', 2)
>>> pp(agenda)
{'commandList': [], 'dataDict': {'a': {'data': 2}}, 'returnDict': {}}
>>> agenda.addData(variableName='b', data=[1,2,3], fmt='list')
>>> pp(agenda)
{'commandList': [],
'dataDict': {'a': {'data': 2}, 'b': {'data': [1, 2, 3], 'fmt': 'list'}},
'returnDict': {}}
'''
dataDictElement = {}
dataDictElement['data'] = data
if fmt != None:
dataDictElement['fmt'] = fmt
self['dataDict'][variableName] = dataDictElement
def getData(self, variableName):
'''
Given a variable name, returns the data stored in the agenda for that variable name. If no data is stored,
returns the value None.
>>> from pprint import pprint as pp
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
>>> agenda.getData('a') == None
True
>>> agenda.addData('a', 2)
>>> agenda.getData('a')
2
'''
if variableName in self['dataDict']:
return self['dataDict'][variableName]['data']
else:
return None
def addCommand(self, commandType, resultVar, caller, command, argList = None):
'''
Adds the specified command to the commandList of the agenda. `commandType` is either "function", "attribute" or method.
resultVar, caller, and command are strings that will result in the form shown below. Set an argument as
none to
argList should be a list of data encoded in an appropriate
format (see :meth:`~music21.webapps.CommandProcessor.parseInputToPrimitive` for more information)
``<resultVar> = <caller>.<command>(<argList>)``
>>> from pprint import pprint as pp
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
>>> agenda.addCommand('method','sc','sc','transpose',['p5'])
>>> pp(agenda)
{'commandList': [{'argList': ['p5'],
'caller': 'sc',
'method': 'transpose',
'resultVar': 'sc'}],
'dataDict': {},
'returnDict': {}}
>>> agenda.addCommand('attribute','scFlat','sc','flat')
>>> pp(agenda)
{'commandList': [{'argList': ['p5'],
'caller': 'sc',
'method': 'transpose',
'resultVar': 'sc'},
{'attribute': 'flat', 'caller': 'sc', 'resultVar': 'scFlat'}],
'dataDict': {},
'returnDict': {}}
'''
commandListElement = {}
commandListElement[commandType] = command
if resultVar != None:
commandListElement['resultVar'] = resultVar
if caller != None:
commandListElement['caller'] = caller
if argList != None:
commandListElement['argList'] = argList
self['commandList'].append(commandListElement)
def setOutputTemplate(self, outputTemplate, outputArgList):
'''
Specifies the output template that will be used for the agenda.
>>> from pprint import pprint as pp ## pprint stablizes dictionary order
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
>>> agenda.setOutputTemplate('templates.noteflightEmbed',['sc'])
>>> pp(agenda)
{'commandList': [],
'dataDict': {},
'outputArgList': ['sc'],
'outputTemplate': 'templates.noteflightEmbed',
'returnDict': {}}
'''
self['outputTemplate'] = outputTemplate
self['outputArgList'] = outputArgList
def loadJson(self, jsonRequestStr):
'''
Runs json.loads on jsonRequestStr and loads the resulting structure into the agenda object.
>>> from pprint import pprint as pp ## pprint stablizes dictionary order
>>> agenda = alpha.webapps.Agenda()
>>> pp(agenda)
{'commandList': [], 'dataDict': {}, 'returnDict': {}}
>>> agenda.loadJson(alpha.webapps.sampleJsonStringSimple)
>>> pp(agenda)
{'commandList': [],
'dataDict': {'myNum': {'data': '23', 'fmt': 'int'}},
'returnDict': {'myNum': 'int'}}
'''
tempDict = json.loads(jsonRequestStr)
for (key, value) in tempDict.items():
# if isinstance(key, unicode):
# key = str(key)
# if isinstance(value, unicode):
# value = str(value)
self[key] = value
#-------------------------------------------------------------------------------
class CommandProcessor(object):
'''
Processes server request for music21.
Takes an Agenda (dict) as input, containing the keys::
'dataDict'
'commandList'
'returnDict'
'outputTemplate'
'outputArgList'
OMIT_FROM_DOCS
TODO: MORE DOCS!
'''
def __init__(self,agenda):
'''
OMIT_FROM_DOCS
Given an agenda
'''
self.agenda = agenda
self.rawDataDict = {}
self.parsedDataDict = {}
self.commandList = []
self.errorList = []
self.returnDict = {}
self.outputTemplate = ""
self.outputArgList = []
if "dataDict" in agenda:
self.rawDataDict = agenda['dataDict']
self._parseData()
if "commandList" in agenda:
self.commandList = agenda['commandList']
if "returnDict" in agenda:
self.returnDict = agenda['returnDict']
if "outputTemplate" in agenda:
self.outputTemplate = agenda['outputTemplate']
if "outputArgList" in agenda:
self.outputArgList = agenda['outputArgList']
def recordError(self, errorString, exceptionObj = None):
'''
Adds an error to the internal errorList array and prints the whole error to stderr
so both the user and the administrator know. Error string represents a brief, human-readable
message decribing the error.
Errors are appended to the errorList as a tuple (errorString, errorTraceback) where errorTraceback
is the traceback of the exception if exceptionObj is specified, otherwise errorTraceback is the empty string
'''
errorTraceback = u''
if exceptionObj is not None:
errorTraceback += traceback.format_exc()
errorString = errorString.encode('ascii','ignore')
sys.stderr.write(errorString)
sys.stderr.write(errorTraceback)
self.errorList.append((('music21_server_error: '+errorString).encode('ascii','ignore'),errorTraceback.encode('ascii','ignore')))
def _parseData(self):
'''
Parses data specified as strings in self.dataDict into objects in self.parsedDataDict
'''
for (name,dataDictElement) in self.rawDataDict.items():
if 'data' not in dataDictElement:
self.recordError("no data specified for data element "+unicode(dataDictElement))
continue
dataStr = dataDictElement['data']
if 'fmt' in dataDictElement:
fmt = dataDictElement['fmt']
if name in self.parsedDataDict:
self.recordError("duplicate definition for data named "+str(name)+" "+str(dataDictElement))
continue
if fmt not in availableDataFormats:
self.recordError("invalid data format for data element "+str(dataDictElement))
continue
if fmt == 'string' or fmt == 'str':
if dataStr.count("'") == 2: # Single Quoted String
data = dataStr.replace("'","") # remove excess quotes
elif dataStr.count("\"") == 2: # Double Quoted String
data = dataStr.replace("\"","") # remove excess quotes
else:
self.recordError("invalid string (not in quotes...) for data element "+str(dataDictElement))
continue
elif fmt == 'int':
try:
data = int(dataStr)
except:
self.recordError("invalid integer for data element "+str(dataDictElement))
continue
elif fmt in ['bool','boolean']:
if dataStr in ['true','True']:
data = True
elif dataStr in ['false','False']:
data = False
else:
self.recordError("invalid boolean for data element "+str(dataDictElement))
continue
elif fmt == 'list':
# in this case dataStr should actually be an list object.
if not common.isIterable(dataStr):
self.recordError("list format must actually be a list structure " +
str(dataDictElement))
continue
data = []
for elementStr in dataStr:
if common.isStr(elementStr):
dataElement = self.parseInputToPrimitive(elementStr)
else:
dataElement = elementStr
data.append(dataElement)
elif fmt == 'file':
data = dataStr
else:
if fmt in ['xml','musicxml']:
if dataStr.find("<!DOCTYPE") == -1:
dataStr = """<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.1 Partwise//EN" "http://www.musicxml.org/dtds/partwise.dtd">""" + dataStr
if dataStr.find("<?xml") == -1:
dataStr = """<?xml version="1.0" encoding="UTF-8"?>""" + dataStr
try:
data = converter.parseData(dataStr)
except converter.ConverterException as e:
#self.recordError("Error parsing data variable "+name+": "+str(e)+"\n\n"+dataStr)
self.recordError("Error parsing data variable "+name+": "+unicode(e)+"\n\n"+dataStr,e)
continue
else: # No format specified
dataStr = str(dataStr)
data = self.parseInputToPrimitive(dataStr)
self.parsedDataDict[name] = data
def executeCommands(self):
'''
Parses JSON Commands specified in the self.commandList
In the JSON, commands are described by:
**'commandList'** whose value is a list specifying commands to be executed by the processor of the form::
"commandList" : [{"<CMD_1_TYPE>": "<CMD_2_COMMAND_NAME>",
"resultVar": "<CMD_1_RESULT_VARIABLE>",
"caller": "<CMD_1_CALLER>",
"command": "<CMD_1_COMMAND_NAME>",
"argList": ['<CMD_1_ARG_1>','<CMD_1_ARG_2>'...]},
"<CMD_2_TYPE>": "<CMD_2_COMMAND_NAME>",
"resultVar": "<CMD_2_RESULT_VARIABLE>",
"caller": "<CMD_2_CALLER>",
"argList": ['<CMD_2_ARG_1>','<CMD_2_ARG_2>'...]},
etc.
]
Calling .executeCommands() iterates through the commandList sequentially, calling the equivalent of::
<CMD_n_RESULT_VARAIBLE> = <CMD_n_CALLER>.<CMD_n_COMMAND_NAME>(<CMD_n_ARG_1>,<CMD_n_ARG_2>...)
where the command TYPE is "function" (no caller), "method" (has a caller), or "attribute"
See :meth:`~music21.webapps.CommandProcessor.executeFunctionCommand`, :meth:`~music21.webapps.CommandProcessor.executeMethodCommand`,
and :meth:`~music21.webapps.CommandProcessor.executeAttributeCommand` for more information about the format
required for those commands.
EXAMPLE::
{"commandList:"[
{"function":"corpus.parse",
"argList":["'bwv7.7'"],
"resultVar":"sc"},
{"method":"transpose",
"caller":"sc",
"argList":["'p5'"],
"resultVar":"sc"},
{"attribute":"flat",
"caller":"sc",
"resultVar":"scFlat"},
{"attribute":"higestOffset",
"caller":"scFlat",
"resultVar":"ho"}
]
}
'''
for commandElement in self.commandList:
typeKeysInCommandList = [k for k in commandElement if k in ['function', 'attribute', 'method']]
if len(typeKeysInCommandList) != 1:
self.recordError("Must have exactly one key denoting type ('function', 'attribute', or 'method'): "+str(commandElement))
continue
commandType = typeKeysInCommandList[0]
if commandType == 'function':
self.executeFunctionCommand(commandElement)
elif commandType == 'attribute':
self.executeAttributeCommand(commandElement)
elif commandType == 'method':
self.executeMethodCommand(commandElement)
else:
self.recordError("No type specified for: "+str(commandElement))
continue
return
def executeFunctionCommand(self, commandElement):
'''
Executes the function command specified by commandElement.
Function command elements should be dictionaries of the form::
{'function': "<FUNCTION_NAME>",
'argList': ["<ARG_1>","<ARG_2>", etc.],
'resultVar' : "<RESULT_VARIABLE>"}
Executing it yields the equivalent of: ``<RESULT_VARIABLE> = <FUNCTION_NAME>(ARG_1, ARG_2, ...)``
The keys argList and resultVar are optional. A commandElement without argList will just call ``<FUNCTION_NAME>()``
with no arguments and a commandElement without resutlVar will not assign the result of the function to any variable.
'''
# Get function name
if 'function' not in commandElement:
self.recordError("No function specified for function command: "+str(commandElement))
return
functionName = commandElement['function']
# Allows users to create aliases for functions via the dataDict.
# i.e. processingCommand = commands.reduction
# then calling a command element with processingCommand(sc) will yield
# the same result as commands.reduction(sc)
if functionName in self.parsedDataDict:
functionName = self.parsedDataDict[functionName]
# Make sure function is valid for processing on webserver
if functionName not in availableFunctions:
self.recordError("Function "+str(functionName)+" not available on webserver:"+str(commandElement))
return
# Process arguments
if 'argList' not in commandElement:
argList = []
else:
argList = commandElement['argList']
for (i,arg) in enumerate(argList):
parsedArg = self.parseInputToPrimitive(arg)
argList[i] = parsedArg
# Call the function
try:
result = eval(functionName)(*argList) # safe because of check for functionName in availableFunctions
except Exception as e:
self.recordError("Error: "+str(e)+" executing function "+str(functionName)+" :"+str(commandElement))
return
# Save it if resutlVar specified
if 'resultVar' in commandElement:
resultVarName = commandElement['resultVar']
self.parsedDataDict[resultVarName] = result
def executeAttributeCommand(self, commandElement):
'''
Executes the attribute command specified by commandElement
Function command elements should be dictionaries of the form::
{'attribute': "<ATTRIBUTE_NAME>",
'caller': "<CALLER_VARIABLE>",
'resultVar' : "<RESULT_VARIABLE>"}
Executing it yields the equivalent of: ``<RESULT_VARIABLE> = <CALLER_VARIABLE>.<ATTRIBUTE_NAME>.``
All three keys 'attributeName', 'caller', and 'resultVar' are required.
'''
# Make sure the appropriate keys are set:
if 'attribute' not in commandElement:
self.recordError("No attribute specified for attribute command: "+str(commandElement))
return
if 'caller' not in commandElement:
self.recordError("calle must be specified with attribute :"+str(commandElement))
return
if 'resultVar' not in commandElement:
self.recordError("resultVar must be specified with attribute :"+str(commandElement))
return
# Get attribute name
attributeName = commandElement['attribute']
# Make sure attribute is valid for processing on webserver
if attributeName not in availableAttribtues:
self.recordError("Attribute "+str(attributeName)+" not available on webserver :"+str(commandElement))
return
# Get the caller and result variable names
callerName = commandElement['caller']
resultVarName = commandElement['resultVar']
# Make sure the caller is defined
if callerName not in self.parsedDataDict:
self.recordError(callerName+" not defined "+str(commandElement))
return
# Check that the caller has the desired attribute
caller = self.parsedDataDict[callerName]
if not hasattr(caller, attributeName):
self.recordError("caller "+str(callerName)+": "+str(caller) +" has no attribute "+str(attributeName)+": "+str(commandElement))
return
self.parsedDataDict[resultVarName] = getattr(caller, attributeName)
def executeMethodCommand(self, commandElement):
'''
Example::
{'method': "<METHOD_NAME>",
'caller': "<CALLER_VARIABLE>",
'argList': ["<ARG_1>","<ARG_2>", etc.],
'resultVar' : "<RESULT_VARIABLE>"}
Executing it yields the equivalent of ``<RESULT_VARIABLE> = <CALLER_VARIABLE>.<METHOD_NAME>(ARG_1, ARG_2, ...)``
The keys argList and resultVar are optional. A commandElement without argList will just call ``<CALLER_VARIABLE>.<METHOD_NAME>()``
with no arguments and a commandElement without resutlVar will not assign the result of the function to any variable.
'''
# Make sure the appropriate keys are set:
if 'method' not in commandElement:
self.recordError("No methodName specified for method command: "+str(commandElement))
return
if 'caller' not in commandElement:
self.recordError("No caller specified for method command: "+str(commandElement))
return
# Get method name and caller name
methodName = commandElement['method']
callerName = commandElement['caller']
# Make sure the method is valid for processing on webserver
if methodName not in availableMethods:
self.recordError("Method "+str(methodName)+" not available on webserver :"+str(commandElement))
return
# Process arguments
if 'argList' not in commandElement:
argList = []
else:
argList = commandElement['argList']
for (i,arg) in enumerate(argList):
parsedArg = self.parseInputToPrimitive(arg)
argList[i] = parsedArg
# Make sure the caller is defined
if callerName not in self.parsedDataDict:
self.recordError(callerName+" not defined "+str(commandElement))
return
# Check that the caller has the desired method
caller = self.parsedDataDict[callerName]
if not hasattr(caller, methodName):
self.recordError("caller "+str(callerName)+": "+str(caller) +" has no method "+str(methodName)+": "+str(commandElement))
return
if not callable(getattr(caller, methodName)):
self.recordError(str(callerName)+"."+str(methodName) +" is not callable: "+str(commandElement))
return
# Call the method
try:
result = getattr(caller, methodName)(*argList)
except Exception:
exc_type, unused_exc_obj, unused_exc_tb = sys.exc_info()
self.recordError("Error: "+str(exc_type)+" executing method "+str(methodName)+" :"+str(commandElement))
return
# Save it if resutlVar specified
if 'resultVar' in commandElement:
resultVarName = commandElement['resultVar']
self.parsedDataDict[resultVarName] = result
def getResultObject(self):
'''
Returns a new object ready for json parsing with the string values of the objects
specified in self.returnDict in the formats specified in self.returnDict::
"returnDict":{
"myNum" : "int",
"ho" : "int"
}
'''
return_obj = {}
return_obj['status'] = "success"
return_obj['dataDict'] = {}
return_obj['errorList'] = []
if len(self.errorList) > 0:
return_obj['status'] = "error"
return_obj['errorList'] = self.errorList
return return_obj
if len(self.returnDict) == 0:
iterItems = [(k, 'str') for k in sorted(list(self.parsedDataDict.items()))]
else:
iterItems = sorted(list(self.returnDict.items()))
for (dataName,fmt) in iterItems:
if dataName not in self.parsedDataDict:
self.recordError("Data element "+dataName+" not defined at time of return")
continue
if fmt not in availableDataFormats:
self.recordError("Format "+fmt+" not available")
continue
data = self.parsedDataDict[dataName]
if fmt == 'string' or fmt == 'str':
dataStr = str(data)
elif fmt == 'musicxml':
dataStr = data.musicxml
elif fmt == 'reprtext':
dataStr = data._reprText()
else:
dataStr = unicode(data)
return_obj['dataDict'][dataName] = {"fmt":fmt, "data":dataStr}
if len(self.errorList) > 0:
return_obj['status'] = "error"
return_obj['errorList'] = self.errorList
return return_obj
return return_obj
def getErrorStr(self):
'''
Converts self.errorList into a string
'''
errorStr = ""
for e in self.errorList:
errorStr += e + "\n"
return errorStr
def parseInputToPrimitive(self, inpVal):
'''
Determines what format a given input is in and returns a value in that format..
First checks if it is the name of a variable defined in the parsedDataDict or the
name of an allowable function. In either of these cases, it will return the actual value
of the data or the actual function.
Next, it will check if the string is an int, float, boolean, or none, returning the appropriate value.
If it is a quoted string then it will remove the quotes on the ends and return it as a string.
If it has square braces indicating a list, the inner elements will be parsed using this same function recursively.
(Note that recursive lists like [1, 2, [3, 4]] are not yet supported
If the input corresponds to none of these types, it is returned as a string.
>>> agenda = alpha.webapps.Agenda()
>>> agenda.addData("a",2)
>>> agenda.addData("b",[1,2,3],"list")
>>> processor = alpha.webapps.CommandProcessor(agenda)
>>> processor.parseInputToPrimitive("a")
2
>>> processor.parseInputToPrimitive("b")
[1, 2, 3]
>>> processor.parseInputToPrimitive("1.0")
1.0
>>> processor.parseInputToPrimitive("2")
2
>>> processor.parseInputToPrimitive("True")
True
>>> processor.parseInputToPrimitive("False")
False
>>> processor.parseInputToPrimitive("None") == None
True
>>> processor.parseInputToPrimitive("'hi'")
'hi'
>>> processor.parseInputToPrimitive("'Madam I\'m Adam'")
"Madam I'm Adam"
>>> processor.parseInputToPrimitive("[1,2,3]")
[1, 2, 3]
>>> processor.parseInputToPrimitive("[1,'hi',3.0,True, a, justAStr]")
[1, 'hi', 3.0, True, 2, 'justAStr']
'''
returnVal = None
if common.isNum(inpVal):
return inpVal
if common.isIterable(inpVal):
return [self.parseInputToPrimitive(element) for element in inpVal]
if not common.isStr(inpVal):
self.recordError("Unknown type for parseInputToPrimitive "+str(inpVal))
strVal = inpVal
strVal = strVal.strip() # removes whitespace on ends
if strVal in self.parsedDataDict: # Used to specify data via variable name
returnVal = self.parsedDataDict[strVal]
elif strVal in availableFunctions: # Used to specify function via variable name
returnVal = strVal
else:
try:
returnVal = int(strVal)
except:
try:
returnVal = float(strVal)
except:
if strVal == "True":
returnVal = True
elif strVal == "None":
returnVal = None
elif strVal == "False":
returnVal = False
elif strVal[0] == '"' and strVal[-1] == '"': # Double Quoted String
returnVal = strVal[1:-1] # remove quotes
elif strVal[0] == "'" and strVal[-1] == "'": # Single Quoted String
returnVal = strVal[1:-1] # remove quotes
elif strVal[0] == "[" and strVal[-1] == "]": # List
listElements = strVal[1:-1].split(",") # remove [] and split by commas
returnVal = [self.parseInputToPrimitive(element) for element in listElements]
else:
returnVal = cgi.escape(str(strVal))
return returnVal
def getOutput(self):
'''
Generates the output of the processor. Uses the attributes outputTemplate and outputArgList from the agenda
to determine which format the output should be in. If an outputTemplate is unspecified or known,
will return json by default.
Return is of the style (output, outputType) where outputType is a content-type ready for returning
to the server:
"text/plain", "application/json", "text/html", etc.
'''
if len(self.errorList) > 0:
output = "<br />".join([":".join(e) for e in self.errorList])
outputType = 'text/html'
if self.outputTemplate == "":
resDict = self.getResultObject()
resOrderedDict = collections.OrderedDict(sorted(list(resDict.items())))
output = json.dumps(resOrderedDict)
output = unicode(output).encode('utf-8')
outputType = 'text/html; charset=utf-8'
# TODO: unify these two -- duplicate code
elif self.outputTemplate not in availableOutputTemplates:
self.recordError("Unknown output template "+str(self.outputTemplate))
resDict = self.getResultObject()
resOrderedDict = collections.OrderedDict(sorted(list(resDict.items())))
output = json.dumps(resOrderedDict,indent=4)
output = unicode(output).encode('utf-8')
outputType = 'text/html; charset=utf-8'
else:
argList = self.outputArgList
for (i,arg) in enumerate(argList):
parsedArg = self.parseInputToPrimitive(arg)
argList[i] = parsedArg
# safe because check for self.outputTemplate in availableOutputTemplates
### But let's still TODO: get rid of eval
(output, outputType) = eval(self.outputTemplate)(*argList)
return (output, outputType)
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
sampleFormDataSimple = '------WebKitFormBoundarytO99C5T6SZEHKAIb\r\nContent-Disposition: form-data; name="a"\r\n\r\n7\r\n------WebKitFormBoundarytO99C5T6SZEHKAIb\r\nContent-Disposition: form-data; name="b"\r\n\r\n8\r\n------WebKitFormBoundarytO99C5T6SZEHKAIb\r\nContent-Disposition: form-data; name="json"\r\n\r\n{"dataDict":{"c":{"data":7}},\r\n "returnDict":{"a":"int"}\r\n }\r\n------WebKitFormBoundarytO99C5T6SZEHKAIb--\r\n'
sampleJsonStringSimple = r'''
{
"dataDict": {
"myNum":
{"fmt" : "int",
"data" : "23"}
},
"returnDict":{
"myNum" : "int"}
}'''
sampleJsonString = r'''
{
"dataDict": {
"myNum":
{"fmt" : "int",
"data" : "23"}
},
"commandList":[
{"function":"corpus.parse",
"argList":["'bwv7.7'"],
"resultVar":"sc"},
{"method":"transpose",
"caller":"sc",
"argList":["'p5'"],
"resultVar":"sc"},
{"attribute":"flat",
"caller":"sc",
"resultVar":"scFlat"},
{"attribute":"highestOffset",
"caller":"scFlat",
"resultVar":"ho"}
],
"returnDict":{
"myNum" : "int",
"ho" : "int"
}
}'''
class Test(unittest.TestCase):
def runTest(self):
pass
def testAgenda(self):
jsonString = r'''
{
"dataDict": {
"myNum":
{"fmt" : "int",
"data" : "23"}
},
"commandList":[
{"function":"corpus.parse",
"argList":["'bwv7.7'"],
"resultVar":"sc"},
{"method":"transpose",
"caller":"sc",
"argList":["'p5'"],
"resultVar":"sc"},
{"attribute":"flat",
"caller":"sc",
"resultVar":"scFlat"},
{"attribute":"highestOffset",
"caller":"scFlat",
"resultVar":"ho"}
],
"returnDict":{
"myNum" : "int",
"ho" : "int"
}
}
'''
ad = Agenda()
ad.loadJson(jsonString)
self.assertEqual(ad['dataDict']['myNum']['data'], "23")
if __name__ == '__main__':
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
| |
import time
import unittest
from hecuba import StorageDict, config
class SimpleDict(StorageDict):
'''
@TypeSpec dict<<key0:int>, val0:int>
'''
class ComplexDict(StorageDict):
'''
@TypeSpec dict<<key0:str, key1:int>, val0:str, val1:int, val2:float, val3:bool>
'''
class LambdaParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old = config.execution_name
config.execution_name = "LambdaParserTest".lower()
@classmethod
def tearDownClass(cls):
config.session.execute("DROP KEYSPACE IF EXISTS {}".format(config.execution_name))
config.execution_name = self.current_ksp
# Create a new keyspace per test
def setUp(self):
self.current_ksp = config.execution_name
pass
def tearDown(self):
pass
def test_simple_filter(self):
simple_dict = SimpleDict("test_simple_filter")
res = filter(lambda x: x.key0 == 5, simple_dict.items())
res = [i for i in res]
self.assertEqual(0, len(res))
simple_dict.delete_persistent()
def test_greater(self):
simple_dict = SimpleDict("test_greater")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
res = filter(lambda x: x.key0 > 5, simple_dict.items())
res = [i for i in res]
self.assertEqual(4, len(res))
self.assertTrue((6, 6) in res)
self.assertTrue((7, 7) in res)
self.assertTrue((8, 8) in res)
self.assertTrue((9, 9) in res)
simple_dict.delete_persistent()
def test_column_not_exist(self):
simple_dict = SimpleDict("test_column_not_exist")
def filter_nonexisting_key():
return filter(lambda x: x.key1 == 5, simple_dict.items())
self.assertRaises(Exception, filter_nonexisting_key)
simple_dict.delete_persistent()
def test_not_persistent_object(self):
simple_dict = SimpleDict()
for i in range(0, 10):
simple_dict[i] = i
res = filter(lambda x: x[0] > 5, simple_dict.items())
res = [i for i in res]
self.assertEqual(4, len(res))
self.assertTrue((6, 6) in res)
self.assertTrue((7, 7) in res)
self.assertTrue((8, 8) in res)
self.assertTrue((9, 9) in res)
def test_filter_equal(self):
simple_dict = SimpleDict("test_filter_equal")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
res = filter(lambda x: x.key0 == 5, simple_dict.items())
res = [i for i in res]
self.assertEqual(1, len(res))
self.assertEqual((5, 5), res[0])
simple_dict.delete_persistent()
def test_filter_inside(self):
simple_dict = SimpleDict("test_filter_inside")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
res = filter(lambda x: x.key0 in [1, 3], simple_dict.items())
res = [i for i in res]
self.assertEqual(2, len(res))
self.assertTrue((1, 1) in res)
self.assertTrue((3, 3) in res)
simple_dict.delete_persistent()
def test_different_columns(self):
simple_dict = SimpleDict("test_different_columns")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
res = filter(lambda x: x.key0 in [1, 2, 3, 5, 6, 9] and x.val0 >= 0 and x.val0 <= 5, simple_dict.items())
res = [i for i in res]
self.assertEqual(4, len(res))
self.assertTrue((1, 1) in res)
self.assertTrue((2, 2) in res)
self.assertTrue((3, 3) in res)
self.assertTrue((5, 5) in res)
simple_dict.delete_persistent()
def test_complex_filter(self):
complex_dict = ComplexDict("test_complex_filter")
for i in range(0, 20):
complex_dict[str(i), i] = [str(i), i, float(i), True]
time.sleep(2)
res = filter(lambda x: x.key0 in ["1", "2", "3", "4", "5"] and x.val1 >= 1 and x.val1 <= 5 and x.val2 >= 1.0 and x.val2 <= 4.0 and x.val3 == True, complex_dict.items())
res = [tuple(i) for i in res]
self.assertEqual(4, len(res))
self.assertTrue((("1", 1), ("1", 1, 1.0, True)) in res)
self.assertTrue((("2", 2), ("2", 2, 2.0, True)) in res)
self.assertTrue((("3", 3), ("3", 3, 3.0, True)) in res)
self.assertTrue((("4", 4), ("4", 4, 4.0, True)) in res)
complex_dict.delete_persistent()
def test_bad_type(self):
simple_dict = SimpleDict("test_bad_type")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
def execute_bad_type():
res = filter(lambda x: x.key0 == "1", simple_dict.items())
self.assertRaises(Exception, execute_bad_type)
simple_dict.delete_persistent()
def test_several_operators(self):
simple_dict = SimpleDict("test_several_operators")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
res = filter(lambda x: x.key0 < 5 and x.key0 >= 3, simple_dict.items())
res = [i for i in res]
self.assertEqual(2, len(res))
self.assertTrue((3, 3) in res)
self.assertTrue((4, 4) in res)
simple_dict.delete_persistent()
def test_reversed_operations(self):
simple_dict = SimpleDict("test_reversed_operations")
for i in range(0, 10):
simple_dict[i] = i
time.sleep(1)
res = filter(lambda x: 5 > x.key0 and 3 <= x.key0, simple_dict.items())
res = [i for i in res]
self.assertEqual(2, len(res))
self.assertTrue((3, 3) in res)
self.assertTrue((4, 4) in res)
simple_dict.delete_persistent()
def test_non_hecuba_filter(self):
l = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
res = list(filter(lambda x: x >= 5, l))
self.assertEqual(res, [5, 6, 7, 8, 9])
def test_split_filter(self):
simple_dict = SimpleDict("test_split_filter")
what_should_be = dict()
for i in range(0, 10):
what_should_be[i] = i
simple_dict[i] = i
time.sleep(1)
filtered = []
normal_filtered = list(python_filter(lambda x: x[0] > 3, simple_dict.items()))
i = 0
for partition in simple_dict.split():
# aggregation of filtering on each partition should be equal to a filter on the whole object
res = filter(lambda x: x.key0 > 3, partition.items())
for row in res:
filtered.append(row)
for k, v in partition.items():
# self.assertTrue((tuple(row.key), list(row.value)) in f2)
self.assertEqual(what_should_be[k], v)
i += 1
self.assertEqual(len(what_should_be), i)
self.assertEqual(len(filtered), len(normal_filtered))
for row in filtered:
self.assertTrue(row in normal_filtered)
simple_dict.delete_persistent()
if __name__ == "__main__":
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import sys
import uuid
import eventlet
import mock
from oslo.config import cfg
import testtools
from quantum.agent.common import config
from quantum.agent import dhcp_agent
from quantum.agent.dhcp_agent import DhcpAgentWithStateReport
from quantum.agent.linux import dhcp
from quantum.agent.linux import interface
from quantum.common import constants
from quantum.common import exceptions
from quantum.openstack.common import jsonutils
from quantum.tests import base
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
HOSTNAME = 'hostname'
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class FakeModel:
def __init__(self, id_, **kwargs):
self.id = id_
self.__dict__.update(kwargs)
def __str__(self):
return str(self.__dict__)
fake_subnet1 = FakeModel('bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=True)
fake_subnet2 = FakeModel('dddddddd-dddd-dddd-dddddddddddd',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=False)
fake_subnet3 = FakeModel('bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='192.168.1.1/24', enable_dhcp=True)
fake_meta_subnet = FakeModel('bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='169.254.169.252/30',
gateway_ip='169.254.169.253', enable_dhcp=True)
fake_fixed_ip = FakeModel('', subnet=fake_subnet1, ip_address='172.9.9.9')
fake_meta_fixed_ip = FakeModel('', subnet=fake_meta_subnet,
ip_address='169.254.169.254')
fake_port1 = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip])
fake_port2 = FakeModel('12345678-1234-aaaa-123456789000',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab')
fake_meta_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=constants.DEVICE_OWNER_ROUTER_INTF,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip])
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet2],
ports=[fake_port1])
fake_meta_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port])
fake_down_network = FakeModel('12345678-dddd-dddd-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=False,
subnets=[],
ports=[])
class TestDhcpAgent(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgent, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
cfg.CONF.register_opts(dhcp_agent.DhcpLeaseRelay.OPTS)
cfg.CONF.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
self.driver_cls_p = mock.patch(
'quantum.agent.dhcp_agent.importutils.import_class')
self.driver = mock.Mock(name='driver')
self.driver.existing_dhcp_networks.return_value = []
self.driver_cls = self.driver_cls_p.start()
self.driver_cls.return_value = self.driver
def tearDown(self):
self.driver_cls_p.stop()
cfg.CONF.reset()
super(TestDhcpAgent, self).tearDown()
def test_dhcp_agent_manager(self):
state_rpc_str = 'quantum.agent.rpc.PluginReportStateAPI'
lease_relay_str = 'quantum.agent.dhcp_agent.DhcpLeaseRelay'
with mock.patch.object(DhcpAgentWithStateReport,
'sync_state',
autospec=True) as mock_sync_state:
with mock.patch.object(DhcpAgentWithStateReport,
'periodic_resync',
autospec=True) as mock_periodic_resync:
with mock.patch(state_rpc_str) as state_rpc:
with mock.patch(lease_relay_str) as mock_lease_relay:
with mock.patch.object(sys, 'argv') as sys_argv:
sys_argv.return_value = [
'dhcp', '--config-file',
etcdir('quantum.conf.test')]
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(
dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(
dhcp_agent.DhcpLeaseRelay.OPTS)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF(project='quantum')
agent_mgr = DhcpAgentWithStateReport('testhost')
eventlet.greenthread.sleep(1)
agent_mgr.after_start()
mock_sync_state.assert_called_once_with(agent_mgr)
mock_periodic_resync.assert_called_once_with(
agent_mgr)
state_rpc.assert_has_calls(
[mock.call(mock.ANY),
mock.call().report_state(mock.ANY, mock.ANY)])
mock_lease_relay.assert_has_calls(
[mock.call(mock.ANY),
mock.call().start()])
def test_dhcp_agent_main_agent_manager(self):
logging_str = 'quantum.agent.common.config.setup_logging'
launcher_str = 'quantum.openstack.common.service.ServiceLauncher'
with mock.patch(logging_str):
with mock.patch.object(sys, 'argv') as sys_argv:
with mock.patch(launcher_str) as launcher:
sys_argv.return_value = ['dhcp', '--config-file',
etcdir('quantum.conf.test')]
dhcp_agent.main()
launcher.assert_has_calls(
[mock.call(), mock.call().launch_service(mock.ANY),
mock.call().wait()])
def test_run_completes_single_pass(self):
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['sync_state', 'lease_relay', 'periodic_resync']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
dhcp.run()
mocks['sync_state'].assert_called_once_with()
mocks['periodic_resync'].assert_called_once_with()
mocks['lease_relay'].assert_has_mock_calls(
[mock.call.start()])
def test_ns_name(self):
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
mock_net = mock.Mock(id='foo')
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertEqual(dhcp._ns_name(mock_net), 'qdhcp-foo')
def test_ns_name_disabled_namespace(self):
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
cfg.CONF.set_override('use_namespaces', False)
mock_net = mock.Mock(id='foo')
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertIsNone(dhcp._ns_name(mock_net))
def test_call_driver(self):
network = mock.Mock()
network.id = '1'
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
dhcp = dhcp_agent.DhcpAgent(cfg.CONF)
self.assertTrue(dhcp.call_driver('foo', network))
self.assertTrue(dev_mgr.called)
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
'sudo',
mock.ANY,
'qdhcp-1',
mock.ANY)
def test_call_driver_failure(self):
network = mock.Mock()
network.id = '1'
self.driver.return_value.foo.side_effect = Exception
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertIsNone(dhcp.call_driver('foo', network))
self.assertTrue(dev_mgr.called)
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
'sudo',
mock.ANY,
'qdhcp-1',
mock.ANY)
self.assertEqual(log.call_count, 1)
self.assertTrue(dhcp.needs_resync)
def test_update_lease(self):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.update_lease('net_id', '192.168.1.1', 120)
plug.assert_has_calls(
[mock.call().update_lease_expiration(
'net_id', '192.168.1.1', 120)])
def test_update_lease_failure(self):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
plug.return_value.update_lease_expiration.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.update_lease('net_id', '192.168.1.1', 120)
plug.assert_has_calls(
[mock.call().update_lease_expiration(
'net_id', '192.168.1.1', 120)])
self.assertTrue(log.called)
self.assertTrue(dhcp.needs_resync)
def _test_sync_state_helper(self, known_networks, active_networks):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks.return_value = active_networks
plug.return_value = mock_plugin
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['refresh_dhcp_helper', 'disable_dhcp_helper', 'cache']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
mocks['cache'].get_network_ids.return_value = known_networks
dhcp.sync_state()
exp_refresh = [
mock.call(net_id) for net_id in active_networks]
diff = set(known_networks) - set(active_networks)
exp_disable = [mock.call(net_id) for net_id in diff]
mocks['cache'].assert_has_calls([mock.call.get_network_ids()])
mocks['refresh_dhcp_helper'].assert_has_called(exp_refresh)
mocks['disable_dhcp_helper'].assert_has_called(exp_disable)
def test_sync_state_initial(self):
self._test_sync_state_helper([], ['a'])
def test_sync_state_same(self):
self._test_sync_state_helper(['a'], ['a'])
def test_sync_state_disabled_net(self):
self._test_sync_state_helper(['b'], ['a'])
def test_sync_state_plugin_error(self):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks.side_effect = Exception
plug.return_value = mock_plugin
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.sync_state()
self.assertTrue(log.called)
self.assertTrue(dhcp.needs_resync)
def test_periodic_resync(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn:
dhcp.periodic_resync()
spawn.assert_called_once_with(dhcp._periodic_resync_helper)
def test_periodoc_resync_helper(self):
with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.needs_resync = True
with mock.patch.object(dhcp, 'sync_state') as sync_state:
sync_state.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
dhcp._periodic_resync_helper()
sync_state.assert_called_once_with()
sleep.assert_called_once_with(dhcp.conf.resync_interval)
self.assertFalse(dhcp.needs_resync)
def test_populate_cache_on_start_without_active_networks_support(self):
# emul dhcp driver that doesn't support retrieving of active networks
self.driver.existing_dhcp_networks.side_effect = NotImplementedError
with mock.patch.object(dhcp_agent.LOG, 'debug') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
cfg.CONF.root_helper
)
self.assertFalse(dhcp.cache.get_network_ids())
self.assertTrue(log.called)
def test_populate_cache_on_start(self):
networks = ['aaa', 'bbb']
self.driver.existing_dhcp_networks.return_value = networks
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
cfg.CONF.root_helper
)
self.assertEquals(set(networks), set(dhcp.cache.get_network_ids()))
class TestLogArgs(base.BaseTestCase):
def test_log_args_without_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': None,
'log_file': None}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_file(self):
conf_dict = {'debug': True,
'verbose': True,
'log_dir': '/etc/tests',
'log_file': None}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug', '--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': '/etc/tests',
'log_file': 'tests/filelog'}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_dir(self):
conf_dict = {'debug': True,
'verbose': False,
'log_file': 'tests/filelog',
'log_dir': None}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
class TestDhcpAgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentEventHandler, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(dhcp_agent.DhcpLeaseRelay.OPTS)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
self.plugin_p = mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi')
plugin_cls = self.plugin_p.start()
self.plugin = mock.Mock()
plugin_cls.return_value = self.plugin
self.cache_p = mock.patch('quantum.agent.dhcp_agent.NetworkCache')
cache_cls = self.cache_p.start()
self.cache = mock.Mock()
cache_cls.return_value = self.cache
self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver')
self.call_driver = self.call_driver_p.start()
self.external_process_p = mock.patch(
'quantum.agent.linux.external_process.ProcessManager'
)
self.external_process = self.external_process_p.start()
def tearDown(self):
self.external_process_p.stop()
self.call_driver_p.stop()
self.cache_p.stop()
self.plugin_p.stop()
cfg.CONF.reset()
super(TestDhcpAgentEventHandler, self).tearDown()
def _enable_dhcp_helper(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.plugin.get_network_info.return_value = fake_network
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.cache.assert_has_calls([mock.call.put(fake_network)])
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().enable(mock.ANY)
])
else:
self.assertFalse(self.external_process.call_count)
def test_enable_dhcp_helper_enable_isolated_metadata(self):
self._enable_dhcp_helper(isolated_metadata=True)
def test_enable_dhcp_helper(self):
self._enable_dhcp_helper()
def test_enable_dhcp_helper_down_network(self):
self.plugin.get_network_info.return_value = fake_down_network
self.dhcp.enable_dhcp_helper(fake_down_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_down_network.id)])
self.assertFalse(self.call_driver.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_exception_during_rpc(self):
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertTrue(self.dhcp.needs_resync)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_driver_failure(self):
self.plugin.get_network_info.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_known_network(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_known_network_isolated_metadata(self):
self._disable_dhcp_helper_known_network(isolated_metadata=True)
def test_disable_dhcp_helper_known_network(self):
self._disable_dhcp_helper_known_network()
def test_disable_dhcp_helper_unknown_network(self):
self.cache.get_network_by_id.return_value = None
self.dhcp.disable_dhcp_helper('abcdef')
self.cache.assert_has_calls(
[mock.call.get_network_by_id('abcdef')])
self.assertEqual(0, self.call_driver.call_count)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_driver_failure_isolated_metadata(self):
self._disable_dhcp_helper_driver_failure(isolated_metadata=True)
def test_disable_dhcp_helper_driver_failure(self):
self._disable_dhcp_helper_driver_failure()
def test_enable_isolated_metadata_proxy(self):
class_path = 'quantum.agent.linux.external_process.ProcessManager'
with mock.patch(class_path) as ext_process:
self.dhcp.enable_isolated_metadata_proxy(fake_network)
ext_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().enable(mock.ANY)
])
def test_disable_isolated_metadata_proxy(self):
class_path = 'quantum.agent.linux.external_process.ProcessManager'
with mock.patch(class_path) as ext_process:
self.dhcp.disable_isolated_metadata_proxy(fake_network)
ext_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
def test_enable_isolated_metadata_proxy_with_metadata_network(self):
cfg.CONF.set_override('enable_metadata_network', True)
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('log_file', 'test.log')
class_path = 'quantum.agent.linux.ip_lib.IPWrapper'
self.external_process_p.stop()
# Ensure the mock is restored if this test fail
try:
with mock.patch(class_path) as ip_wrapper:
self.dhcp.enable_isolated_metadata_proxy(fake_meta_network)
ip_wrapper.assert_has_calls([mock.call(
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().netns.execute([
'quantum-ns-metadata-proxy',
mock.ANY,
'--router_id=forzanapoli',
mock.ANY,
mock.ANY,
'--debug',
('--log-file=quantum-ns-metadata-proxy%s.log' %
fake_meta_network.id)])
])
finally:
self.external_process_p.start()
def test_network_create_end(self):
payload = dict(network=dict(id=fake_network.id))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_create_end(None, payload)
enable.assertCalledOnceWith(fake_network.id)
def test_network_update_end_admin_state_up(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=True))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_update_end(None, payload)
enable.assertCalledOnceWith(fake_network.id)
def test_network_update_end_admin_state_down(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=False))
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_update_end(None, payload)
disable.assertCalledOnceWith(fake_network.id)
def test_network_delete_end(self):
payload = dict(network_id=fake_network.id)
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_delete_end(None, payload)
disable.assertCalledOnceWith(fake_network.id)
def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self):
network = FakeModel('net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[])
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.return_value = network
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.refresh_dhcp_helper(network.id)
disable.called_once_with_args(network.id)
self.assertFalse(self.cache.called)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
def test_refresh_dhcp_helper_exception_during_rpc(self):
network = FakeModel('net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[])
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.refresh_dhcp_helper(network.id)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
self.assertTrue(log.called)
self.assertTrue(self.dhcp.needs_resync)
def test_subnet_update_end(self):
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_subnet_update_end_restart(self):
new_state = FakeModel(fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1])
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = new_state
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(new_state)])
self.call_driver.assert_called_once_with('restart',
new_state)
def test_subnet_update_end_delete_payload(self):
prev_state = FakeModel(fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1])
payload = dict(subnet_id=fake_subnet1.id)
self.cache.get_network_by_subnet_id.return_value = prev_state
self.cache.get_network_by_id.return_value = prev_state
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_delete_end(None, payload)
self.cache.assert_has_calls([
mock.call.get_network_by_subnet_id(
'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'),
mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'),
mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('restart',
fake_network)
def test_port_update_end(self):
payload = dict(port=vars(fake_port2))
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port2.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_delete_end(self):
payload = dict(port_id=fake_port2.id)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_port_by_id(fake_port2.id),
mock.call.get_network_by_id(fake_network.id),
mock.call.remove_port(fake_port2)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_delete_end_unknown_port(self):
payload = dict(port_id='unknown')
self.cache.get_port_by_id.return_value = None
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')])
self.assertEqual(self.call_driver.call_count, 0)
class TestDhcpPluginApiProxy(base.BaseTestCase):
def setUp(self):
super(TestDhcpPluginApiProxy, self).setUp()
self.proxy = dhcp_agent.DhcpPluginApi('foo', {})
self.proxy.host = 'foo'
self.call_p = mock.patch.object(self.proxy, 'call')
self.call = self.call_p.start()
self.make_msg_p = mock.patch.object(self.proxy, 'make_msg')
self.make_msg = self.make_msg_p.start()
def tearDown(self):
self.make_msg_p.stop()
self.call_p.stop()
super(TestDhcpPluginApiProxy, self).tearDown()
def test_get_active_networks(self):
self.proxy.get_active_networks()
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('get_active_networks',
host='foo')
def test_get_network_info(self):
self.call.return_value = dict(a=1)
retval = self.proxy.get_network_info('netid')
self.assertEqual(retval.a, 1)
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('get_network_info',
network_id='netid',
host='foo')
def test_get_dhcp_port(self):
self.call.return_value = dict(a=1)
retval = self.proxy.get_dhcp_port('netid', 'devid')
self.assertEqual(retval.a, 1)
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('get_dhcp_port',
network_id='netid',
device_id='devid',
host='foo')
def test_release_dhcp_port(self):
self.proxy.release_dhcp_port('netid', 'devid')
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('release_dhcp_port',
network_id='netid',
device_id='devid',
host='foo')
def test_release_port_fixed_ip(self):
self.proxy.release_port_fixed_ip('netid', 'devid', 'subid')
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('release_port_fixed_ip',
network_id='netid',
subnet_id='subid',
device_id='devid',
host='foo')
def test_update_lease_expiration(self):
with mock.patch.object(self.proxy, 'cast') as mock_cast:
self.proxy.update_lease_expiration('netid', 'ipaddr', 1)
self.assertTrue(mock_cast.called)
self.make_msg.assert_called_once_with('update_lease_expiration',
network_id='netid',
ip_address='ipaddr',
lease_remaining=1,
host='foo')
class TestNetworkCache(base.BaseTestCase):
def test_put_network(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_put_network_existing(self):
prev_network_info = mock.Mock()
nc = dhcp_agent.NetworkCache()
with mock.patch.object(nc, 'remove') as remove:
nc.cache[fake_network.id] = prev_network_info
nc.put(fake_network)
remove.assert_called_once_with(prev_network_info)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_remove_network(self):
nc = dhcp_agent.NetworkCache()
nc.cache = {fake_network.id: fake_network}
nc.subnet_lookup = {fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id}
nc.port_lookup = {fake_port1.id: fake_network.id}
nc.remove(fake_network)
self.assertEqual(len(nc.cache), 0)
self.assertEqual(len(nc.subnet_lookup), 0)
self.assertEqual(len(nc.port_lookup), 0)
def test_get_network_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network)
def test_get_network_ids(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_ids(), [fake_network.id])
def test_get_network_by_subnet_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id),
fake_network)
def test_get_network_by_port_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_port_id(fake_port1.id),
fake_network)
def test_put_port(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1])
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_network.ports)
def test_put_port_existing(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2])
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_network.ports)
def test_remove_port_existing(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2])
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
nc.remove_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 1)
self.assertNotIn(fake_port2, fake_network.ports)
def test_get_port_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1)
class TestDeviceManager(base.BaseTestCase):
def setUp(self):
super(TestDeviceManager, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
cfg.CONF.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_isolated_metadata', True)
self.device_exists_p = mock.patch(
'quantum.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.dvr_cls_p = mock.patch('quantum.agent.linux.interface.NullDriver')
self.iproute_cls_p = mock.patch('quantum.agent.linux.'
'ip_lib.IpRouteCommand')
driver_cls = self.dvr_cls_p.start()
iproute_cls = self.iproute_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
self.mock_iproute = mock.MagicMock()
driver_cls.return_value = self.mock_driver
iproute_cls.return_value = self.mock_iproute
def tearDown(self):
self.dvr_cls_p.stop()
self.device_exists_p.stop()
self.iproute_cls_p.stop()
cfg.CONF.reset()
super(TestDeviceManager, self).tearDown()
def _test_setup_helper(self, device_exists, reuse_existing=False,
net=None, port=None):
net = net or fake_network
port = port or fake_port1
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = port or fake_port1
self.device_exists.return_value = device_exists
self.mock_driver.get_device_name.return_value = 'tap12345678-12'
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
interface_name = dh.setup(net, reuse_existing)
self.assertEqual(interface_name, 'tap12345678-12')
plugin.assert_has_calls([
mock.call.get_dhcp_port(net.id, mock.ANY)])
namespace = dhcp_agent.NS_PREFIX + net.id
expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
expected = [mock.call.init_l3('tap12345678-12',
expected_ips,
namespace=namespace)]
if not reuse_existing:
expected.insert(0,
mock.call.plug(net.id,
port.id,
'tap12345678-12',
'aa:bb:cc:dd:ee:ff',
namespace=namespace))
self.mock_driver.assert_has_calls(expected)
def test_setup(self):
self._test_setup_helper(False)
def test_setup_device_exists(self):
with testtools.ExpectedException(exceptions.PreexistingDeviceFailure):
self._test_setup_helper(True)
def test_setup_device_exists_reuse(self):
self._test_setup_helper(True, True)
def test_destroy(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
fake_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff')
expected_driver_calls = [mock.call(cfg.CONF),
mock.call().get_device_name(fake_network),
mock.call().unplug('tap12345678-12')]
with mock.patch('quantum.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
#mock_driver.DEV_NAME_LEN = (
# interface.LinuxInterfaceDriver.DEV_NAME_LEN)
#mock_driver.port = fake_port
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
dh.destroy(fake_network, 'tap12345678-12')
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.unplug('tap12345678-12',
namespace='qdhcp-' + fake_network.id)])
plugin.assert_has_calls(
[mock.call.release_dhcp_port(fake_network.id, mock.ANY)])
def test_get_interface_name(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
fake_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff')
expected_driver_calls = [mock.call(cfg.CONF),
mock.call().get_device_name(fake_network),
mock.call().unplug('tap12345678-12')]
with mock.patch('quantum.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
dh.get_interface_name(fake_network, fake_port)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
self.assertEqual(len(plugin.mock_calls), 0)
def test_get_interface_name_no_port_provided(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
fake_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff')
expected_driver_calls = [mock.call(cfg.CONF),
mock.call().get_device_name(fake_network),
mock.call().unplug('tap12345678-12')]
with mock.patch('quantum.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
dh.get_interface_name(fake_network)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
plugin.assert_has_calls(
[mock.call.get_dhcp_port(fake_network.id, mock.ANY)])
def test_get_device_id(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-'
'5678-1234567890ab')
with mock.patch('socket.gethostbyname') as get_host:
with mock.patch('uuid.uuid5') as uuid5:
uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457'
get_host.return_value = 'localhost'
dh = dhcp_agent.DeviceManager(cfg.CONF, None)
uuid5.called_once_with(uuid.NAMESPACE_DNS, 'localhost')
self.assertEqual(dh.get_device_id(fake_network), expected)
class TestDhcpLeaseRelay(base.BaseTestCase):
def setUp(self):
super(TestDhcpLeaseRelay, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DhcpLeaseRelay.OPTS)
self.unlink_p = mock.patch('os.unlink')
self.unlink = self.unlink_p.start()
def tearDown(self):
self.unlink_p.stop()
super(TestDhcpLeaseRelay, self).tearDown()
def test_init_relay_socket_path_no_prev_socket(self):
with mock.patch('os.path.exists') as exists:
exists.return_value = False
self.unlink.side_effect = OSError
relay = dhcp_agent.DhcpLeaseRelay(None)
self.unlink.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
exists.assert_called_once_with(cfg.CONF.dhcp_lease_relay_socket)
def test_init_relay_socket_path_prev_socket_exists(self):
with mock.patch('os.path.exists') as exists:
exists.return_value = False
relay = dhcp_agent.DhcpLeaseRelay(None)
self.unlink.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
self.assertFalse(exists.called)
def test_init_relay_socket_path_prev_socket_unlink_failure(self):
self.unlink.side_effect = OSError
with mock.patch('os.path.exists') as exists:
exists.return_value = True
with testtools.ExpectedException(OSError):
relay = dhcp_agent.DhcpLeaseRelay(None)
self.unlink.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
exists.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
def test_handler_valid_data(self):
network_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
ip_address = '192.168.1.9'
lease_remaining = 120
json_rep = jsonutils.dumps(dict(network_id=network_id,
lease_remaining=lease_remaining,
ip_address=ip_address))
handler = mock.Mock()
mock_sock = mock.Mock()
mock_sock.recv.return_value = json_rep
relay = dhcp_agent.DhcpLeaseRelay(handler)
relay._handler(mock_sock, mock.Mock())
mock_sock.assert_has_calls([mock.call.recv(1024), mock.call.close()])
handler.called_once_with(network_id, ip_address, lease_remaining)
def test_handler_invalid_data(self):
network_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
ip_address = '192.168.x.x'
lease_remaining = 120
json_rep = jsonutils.dumps(
dict(network_id=network_id,
lease_remaining=lease_remaining,
ip_address=ip_address))
handler = mock.Mock()
mock_sock = mock.Mock()
mock_sock.recv.return_value = json_rep
relay = dhcp_agent.DhcpLeaseRelay(handler)
with mock.patch('quantum.openstack.common.'
'uuidutils.is_uuid_like') as validate:
validate.return_value = False
with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
relay._handler(mock_sock, mock.Mock())
mock_sock.assert_has_calls(
[mock.call.recv(1024), mock.call.close()])
self.assertFalse(handler.called)
self.assertTrue(log.called)
def test_handler_other_exception(self):
network_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
ip_address = '192.168.x.x'
lease_remaining = 120
json_rep = jsonutils.dumps(
dict(network_id=network_id,
lease_remaining=lease_remaining,
ip_address=ip_address))
handler = mock.Mock()
mock_sock = mock.Mock()
mock_sock.recv.side_effect = Exception
relay = dhcp_agent.DhcpLeaseRelay(handler)
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
relay._handler(mock_sock, mock.Mock())
mock_sock.assert_has_calls([mock.call.recv(1024)])
self.assertFalse(handler.called)
self.assertTrue(log.called)
def test_start(self):
with mock.patch.object(dhcp_agent, 'eventlet') as mock_eventlet:
handler = mock.Mock()
relay = dhcp_agent.DhcpLeaseRelay(handler)
relay.start()
mock_eventlet.assert_has_calls(
[mock.call.listen(cfg.CONF.dhcp_lease_relay_socket,
family=socket.AF_UNIX),
mock.call.spawn(mock_eventlet.serve,
mock.call.listen.return_value,
relay._handler)])
class TestDictModel(base.BaseTestCase):
def test_basic_dict(self):
d = dict(a=1, b=2)
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
def test_dict_has_sub_dict(self):
d = dict(a=dict(b=2))
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a.b, 2)
def test_dict_contains_list(self):
d = dict(a=[1, 2])
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a, [1, 2])
def test_dict_contains_list_of_dicts(self):
d = dict(a=[dict(b=2), dict(c=3)])
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a[0].b, 2)
self.assertEqual(m.a[1].c, 3)
| |
#!/usr/bin/env python
#:coding=utf-8:
#:tabSize=2:indentSize=2:noTabs=true:
#:folding=explicit:collapseFolds=1:
#TODO: Support references
#TODO: Support inline schema
import types, sys, re, copy
class JSONSchemaValidator:
'''
Implementation of the json-schema validator that adheres to the
JSON Schema Proposal 2nd Draft.
'''
# Map of schema types to their equivalent in the python types module
_typesmap = {
"string": [types.StringType, types.UnicodeType],
"integer": types.IntType,
"number": [types.IntType, types.FloatType],
"boolean": types.BooleanType,
"object": types.DictType,
"array": types.ListType,
"null": types.NoneType,
"any": None
}
# Default schema property values.
_schemadefault = {
"id": None,
"type": None,
"properties": None,
"items": None,
"optional": False,
"additionalProperties": None,
"requires": None,
"identity": None,
"minimum": None,
"maximum": None,
"minItems": None,
"maxItems": None,
"pattern": None,
"maxLength": None,
"minLength": None,
"enum": None,
"options": None,
"readonly": None,
"title": None,
"description": None,
"format": None,
"default": None,
"transient": None,
"maxDecimal": None,
"hidden": None,
"disallow": None,
"extends": None
}
_refmap = {}
_interactive_mode = True
def __init__(self, interactive_mode=True):
self._interactive_mode = interactive_mode
def validate_id(self, x, fieldname, schema, ID=None):
'''
Validates a schema id and adds it to the schema reference map
'''
if ID is not None:
if ID == "$":
raise ValueError("Reference id for field '%s' cannot equal '$'" % fieldname)
self._refmap[ID] = schema
return x
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
converted_fieldtype = self._convert_type(fieldtype)
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
finally:
value = x.get(fieldname)
if converted_fieldtype is not None and fieldexists:
if type(converted_fieldtype) == types.ListType:
# Match if type matches any one of the types in the list
datavalid = False
for eachtype in converted_fieldtype:
try:
self.validate_type(x, fieldname, eachtype, eachtype)
datavalid = True
break
except ValueError:
pass
if not datavalid:
raise ValueError("Value %r for field '%s' is not of type %r" % (value, fieldname, fieldtype))
elif type(converted_fieldtype) == types.DictType:
try:
self.__validate(fieldname, x, converted_fieldtype)
except ValueError,e:
raise e
else:
if type(value) != converted_fieldtype:
raise ValueError("Value %r for field '%s' is not of type %r" % (value, fieldname, fieldtype))
return x
def validate_properties(self, x, fieldname, schema, properties=None):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
if properties is not None and x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) == types.DictType:
if type(properties) == types.DictType:
for eachProp in properties.keys():
self.__validate(eachProp, value, properties.get(eachProp))
else:
raise ValueError("Properties definition of field '%s' is not an object" % fieldname)
return x
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
if items is not None and x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) == types.ListType:
if type(items) == types.ListType:
if len(items) == len(value):
for itemIndex in range(len(items)):
try:
self.validate(value[itemIndex], items[itemIndex])
except ValueError, e:
raise ValueError("Failed to validate field '%s' list schema: %r" % (fieldname, e.message))
else:
raise ValueError("Length of list %r for field '%s' is not equal to length of schema list" % (value, fieldname))
elif type(items) == types.DictType:
for eachItem in value:
try:
self._validate(eachItem, items)
except ValueError, e:
raise ValueError("Failed to validate field '%s' list schema: %r" % (fieldname, e.message))
else:
raise ValueError("Properties definition of field '%s' is not a list or an object" % fieldname)
return x
def validate_optional(self, x, fieldname, schema, optional=False):
'''
Validates that the given field is present if optional is false
'''
# Make sure the field is present
if fieldname not in x.keys() and not optional:
raise ValueError("Required field '%s' is missing" % fieldname)
return x
def validate_additionalProperties(self, x, fieldname, schema, additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
if additionalProperties is not None:
# If additionalProperties is the boolean value True then we accept any
# additional properties.
if type(additionalProperties) == types.BooleanType and additionalProperties == True:
return x
value = x.get(fieldname)
if type(additionalProperties) == types.DictType \
or type(additionalProperties) == types.BooleanType:
properties = schema.get("properties")
if properties is None:
properties = {}
for eachProperty in value.keys():
if eachProperty not in properties:
# If additionalProperties is the boolean value False then we
# don't accept any additional properties.
if type(additionalProperties) == types.BooleanType and additionalProperties == False:
raise ValueError("Additional properties not defined by 'properties' are not allowed in field '%s'" % fieldname)
self.__validate(eachProperty, value, additionalProperties)
else:
raise ValueError("additionalProperties schema definition for field '%s' is not an object" % fieldname)
return x
def validate_requires(self, x, fieldname, schema, requires=None):
if x.get(fieldname) is not None and requires is not None:
if x.get(requires) is None:
raise ValueError("Field '%s' is required by field '%s'" % (requires, fieldname))
return x
def validate_identity(self, x, fieldname, schema, unique=False):
return x
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
if minimum is not None and x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) in (types.IntType,types.FloatType) and value < minimum:
raise ValueError("Value %r for field '%s' is less than minimum value: %f" % (value, fieldname, minimum))
elif type(value) == types.ListType and len(value) < minimum:
raise ValueError("Value %r for field '%s' has fewer values than the minimum: %f" % (value, fieldname, minimum))
return x
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
if maximum is not None and x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) in (types.IntType, types.FloatType) and value > maximum:
raise ValueError("Value %r for field '%s' is greater than maximum value: %f" % (value, fieldname, maximum))
elif type(value) == types.ListType and len(value) > maximum:
raise ValueError("Value %r for field '%s' has more values than the maximum: %f" % (value, fieldname, maximum))
return x
def validate_minItems(self, x, fieldname, schema, minitems=None):
'''
Validates that the number of items in the given field is equal to or
more than the minimum amount.
'''
if minitems is not None and x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) == types.ListType and len(value) < minitems:
raise ValueError("Value %r for field '%s' must have a minimum of %d items" % (fieldname, fieldname, minitems))
return x
def validate_maxItems(self, x, fieldname, schema, maxitems=None):
'''
Validates that the number of items in the given field is equal to or
less than the maximum amount.
'''
if maxitems is not None and x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if type(value) == types.ListType and len(value) > maxitems:
raise ValueError("Value %r for field '%s' must have a maximum of %d items" % (value, fieldname, maxitems))
return x
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = x.get(fieldname)
if pattern is not None and \
value is not None and \
self._is_string_type(value):
p = re.compile(pattern)
if not p.match(value):
raise ValueError("Value %r for field '%s' does not match regular expression '%s'" % (value, fieldname, pattern))
return x
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length if a string
'''
value = x.get(fieldname)
if length is not None and \
value is not None and \
self._is_string_type(value) and \
len(value) > length:
raise ValueError("Length of value %r for field '%s' must be less than or equal to %f" % (value, fieldname, length))
return x
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length if a string
'''
value = x.get(fieldname)
if length is not None and \
value is not None and \
self._is_string_type(value) and \
len(value) < length:
raise ValueError("Length of value %r for field '%s' must be more than or equal to %f" % (value, fieldname, length))
return x
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = x.get(fieldname)
if options is not None and value is not None:
if not type(options) == types.ListType:
raise ValueError("Enumeration %r for field '%s' is not a list type", (options, fieldname))
if value not in options:
raise ValueError("Value %r for field '%s' is not in the enumeration: %r" % (value, fieldname, options))
return x
def validate_options(self, x, fieldname, schema, options=None):
return x
def validate_readonly(self, x, fieldname, schema, readonly=False):
return x
def validate_title(self, x, fieldname, schema, title=None):
if title is not None and \
not self._is_string_type(title):
raise ValueError("The title for field '%s' must be a string" % fieldname);
return x
def validate_description(self, x, fieldname, schema, description=None):
if description is not None and \
not self._is_string_type(description):
raise ValueError("The description for field '%s' must be a string." % fieldname);
return x
def validate_format(self, x, fieldname, schema, format=None):
'''
Validates that the value of the field matches the predifined format
specified.
'''
# No definitions are currently defined for formats
return x
def validate_default(self, x, fieldname, schema, default=None):
'''
Adds default data to the original json document if the document is
not readonly
'''
if self._interactive_mode and fieldname not in x.keys() and default is not None:
if not schema.get("readonly"):
x[fieldname] = default
return x
def validate_transient(self, x, fieldname, schema, transient=False):
return x
def validate_maxDecimal(self, x, fieldname, schema, maxdecimal=None):
'''
Validates that the value of the given field has less than or equal
to the maximum number of decimal places given
'''
value = x.get(fieldname)
if maxdecimal is not None and value is not None:
maxdecstring = str(value)
if len(maxdecstring[maxdecstring.find(".")+1:]) > maxdecimal:
raise ValueError("Value %r for field '%s' must not have more than %d decimal places" % (value, fieldname, maxdecimal))
return x
def validate_hidden(self, x, fieldname, schema, hidden=False):
return x
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
if disallow is not None:
try:
self.validate_type(x, fieldname, schema, disallow)
except ValueError:
return x
raise ValueError("Value %r of type %s is disallowed for field '%s'" % (x.get(fieldname), disallow, fieldname))
return x
def validate_extends(self, x, fieldname, schema, extends=None):
return x
def _convert_type(self, fieldtype):
if type(fieldtype) in (types.TypeType, types.DictType):
return fieldtype
elif type(fieldtype) == types.ListType:
converted_fields = []
for subfieldtype in fieldtype:
converted_fields.append(self._convert_type(subfieldtype))
return converted_fields
elif fieldtype is None:
return None
else:
fieldtype = str(fieldtype)
if fieldtype in self._typesmap.keys():
return self._typesmap[fieldtype]
else:
raise ValueError("Field type '%s' is not supported." % fieldtype)
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
'''
#TODO: Validate the schema object here.
self._refmap = {
'$': schema
}
# Wrap the data in a dictionary
self._validate(data, schema)
def _validate(self, data, schema):
self.__validate("_data", {"_data": data}, schema)
def __validate(self, fieldname, data, schema):
if schema is not None:
if not type(schema) == types.DictType:
raise ValueError("Schema structure is invalid.");
# Produce a copy of the schema object since we will make changes to
# it to process default values. Deep copy is not necessary since we will
# produce a copy of sub items on the next recursive call.
new_schema = copy.copy(schema)
#Initialize defaults
for schemaprop in self._schemadefault.keys():
if schemaprop not in new_schema:
new_schema[schemaprop] = self._schemadefault[schemaprop]
for schemaprop in new_schema:
validatorname = "validate_"+schemaprop
try:
validator = getattr(self, validatorname)
# Pass the original schema object but the value of the property from
# copy in order to validate default values.
validator(data, fieldname, schema, new_schema.get(schemaprop))
except AttributeError, e:
raise ValueError("Schema property '%s' is not supported" % schemaprop)
return data
def _is_string_type(self, value):
return type(value) in (types.StringType, types.UnicodeType)
__all__ = [ 'JSONSchemaValidator' ]
| |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""
This module defines common functions and exception class definitions
used all over VisTrails.
"""
from __future__ import division, with_statement
from vistrails.core.utils.enum import enum
from vistrails.core.utils.timemethod import time_method, time_call
from vistrails.core.utils.tracemethod import trace_method, bump_trace, report_stack, \
trace_method_options, trace_method_args
from vistrails.core.utils.color import ColorByName
import copy
from distutils.version import LooseVersion
import errno
import functools
import itertools
import os
import sys
import warnings
import weakref
import unittest
import tempfile
################################################################################
def invert(d):
"""invert(dict) -> dict.
Returns an inverted dictionary by switching key-value pairs. If
you use this repeatedly, consider switching the underlying data
structure to a core.data_structures.bijectivedict.Bidict instead.
"""
return dict([[v,k] for k,v in d.items()])
################################################################################
def unimplemented():
"""Raises UnimplementedException."""
raise UnimplementedException()
def abstract():
"""Raises AbstractException."""
raise AbstractException()
class VistrailsWarning(Warning):
pass
class VistrailsDeprecation(VistrailsWarning):
pass
def deprecated(*args):
new_name = None
def _deprecated(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
if new_name is not None:
warnings.warn("Call to deprecated function %s "
"replaced by %s" % (
func.__name__, new_name),
category=VistrailsDeprecation,
stacklevel=2)
else:
warnings.warn("Call to deprecated function %s" % func.__name__,
category=VistrailsDeprecation,
stacklevel=2)
return func(*args, **kwargs)
return new_func
if len(args) == 1 and callable(args[0]):
return _deprecated(args[0])
else:
new_name = args[0]
return _deprecated
################################################################################
class NoMakeConnection(Exception):
"""NoMakeConnection is raised when a VisConnection doesn't know how to
create a live version of itself. This is an internal error that
should never be seen by a user. Please report a bug if you see
this.
"""
def __init__(self, conn):
self.conn = conn
def __str__(self):
return "Connection %s has no makeConnection method" % self.conn
class NoSummon(Exception):
"""NoSummon is raised when a VisObject doesn't know how to create a
live version of itself. This is an internal error that should
never be seen by a user. Please report a bug if you see this.
"""
def __init__(self, obj):
self.obj = obj
def __str__(self):
return "Module %s has no summon method" % self.obj
class UnimplementedException(Exception):
"""UnimplementedException is raised when some interface hasn't been
implemented yet. This is an internal error that should never be
seen by a user. Please report a bug if you see this.
"""
def __str__(self):
return "Object is Unimplemented"
class AbstractException(Exception):
"""AbstractException is raised when an abstract method is called.
This is an internal error that should never be seen by a
user. Please report a bug if you see this.
"""
def __str__(self):
return "Abstract Method was called"
class VistrailsInternalError(Exception):
"""VistrailsInternalError is raised when an unexpected internal
inconsistency happens. This is (clearly) an internal error that
should never be seen by a user. Please report a bug if you see
this.
"""
def __str__(self):
return "Vistrails Internal Error: %s" % Exception.__str__(self)
class VersionTooLow(Exception):
"""VersionTooLow is raised when you're running an outdated version of
some necessary software or package.
"""
def __init__(self, sw, required_version):
self.sw = sw
self.required_version = required_version
def __str__(self):
return ("Your version of '" +
self.sw +
"' is too low. Please upgrade to " +
self.required_version +
" or later")
class InvalidModuleClass(Exception):
"""InvalidModuleClass is raised when there's something wrong with a
class that's being registered as a module within VisTrails.
"""
def __init__(self, klass):
self.klass = klass
def __str__(self):
return ("class '%s' cannot be registered in VisTrails. Please" +
" consult the documentation.") % self.klass.__name__
class ModuleAlreadyExists(Exception):
"""ModuleAlreadyExists is raised when trying to add a class that
is already in the module registry."""
def __init__(self, identifier, moduleName):
self._identifier = identifier
self._name = moduleName
def __str__(self):
return ("'%s, %s' cannot be registered in VisTrails because of another "
"module with the same identifier and name already exists." %
(self._identifier,
self._name))
class PortAlreadyExists(Exception):
"""PortAlreadyExists is raised when trying to add a PortSpec that
has the same name and type as an existing PortSpec."""
def __init__(self, identifier, module_name, port_type, port_name):
self._identifier = identifier
self._module_name = module_name
self._port_type = port_type
self._port_name = port_name
def __str__(self):
return "Module '%s:%s' already contains an %s port named '%s'" % \
(self._identifier, self._module_name, self._port_type,
self._port_name)
class InvalidPipeline(Exception):
"""InvalidPipeline is raised when a pipeline cannot be instantiated due
to missing information in the registry, like unloaded packages or missing
modules.
parameters:
exception_set: list of all exceptions related to why this is an
invalid pipeline
pipeline: pipeline that is invalid (potentially incomplete, in the
case where modules could not be found, etc). This is stored here
so that pipeline upgrades can be performed appropriately. Since
Controller.do_version_switch (sensibly) bails before setting the
invalid pipeline to current_pipeline and the new value to
current_version, these need to percolate through the exceptions so
the values can be fixed.
version: version id of the pipeline in the vistrail
"""
def __init__(self, exception_set, pipeline=None, version=None):
self._exception_set = exception_set
#the problem here is that sometimes the pipeline can't be copied when
# it is invalid. So if it throws an Exception, we will just ignore
try:
self._pipeline = copy.copy(pipeline)
except Exception:
self._pipeline = None
self._version = version
def __str__(self):
return "Pipeline has errors. Please see the detailed message for more information.\n " + \
'\n '.join(line for e in self._exception_set
for line in str(e).splitlines())
def get_exception_set(self):
return self._exception_set
################################################################################
# Only works for functions with NO kwargs!
def memo_method(method):
"""memo_method is a method decorator that memoizes results of the
decorated method, trading off memory for time by caching previous
results of the calls."""
attrname = "_%s_memo_result" % id(method)
memo = {}
def decorated(self, *args):
try:
return memo[args]
except KeyError:
result = method(self, *args)
memo[args] = result
return result
warn = "(This is a memoized method: Don't mutate the return value you're given.)"
if method.__doc__:
decorated.__doc__ = method.__doc__ + "\n\n" + warn
else:
decorated.__doc__ = warn
return decorated
##############################################################################
# Profiling, utilities
_profiled_list = []
def profile(func):
"""profile is a method decorator that profiles the calls of a
given method using cProfile. You need to get the decorated method
programmatically later to get to the profiler stats. It will be
available as the attribute 'profiler_object' on the decorated
result.
From there, you can simply call save_all_profiles(), and that will
take the list of all profiled methods and save them to different
files.
If you like manual labor, you probably want to do something like this:
>>> po = ...... .profiler_object
>>> po.dump_stats('/tmp/some_temporary_file')
>>> import pstats
>>> ps = pstats.Stats('/tmp/some_temporary_file')
>>> ps.sort_stats('time') # or cumtime, or calls, or others - see doc
>>> ps.print_stats()
"""
# Notice that on ubuntu you will need
# sudo apt-get install python-profiler
try:
import cProfile as prof
except ImportError:
import profile as prof
pobject = prof.Profile()
def method(*args, **kwargs):
return pobject.runcall(func, *args, **kwargs)
method.profiler_object = pobject
_profiled_list.append((func.__name__, method))
return method
def get_profiled_methods():
return _profiled_list
def save_profile_to_disk(callable_, filename):
callable_.profiler_object.dump_stats(filename)
def save_all_profiles():
td = tempfile.gettempdir()
for (name, method) in get_profiled_methods():
fout = td + name + '.pyp'
#print fout
method.profiler_object.dump_stats(fout)
##############################################################################
def debug(func):
"""debug is a method decorator that invokes the python integrated
debugger in a given method. Use it to step through tricky
code. Note that pdb is not integrated with emacs or anything like
that, so you'll need a shell to see what's going on.
"""
import pdb
def method(*args, **kwargs):
return pdb.runcall(func, *args, **kwargs)
return method
################################################################################
# Write our own all() and any() if python version < 2.5
if sys.version_info < (2, 5):
def any(iterable):
"""any(iterable) -> Boolean - Returns true if any element
is true. This is meant to be the equivalent of python 2.5's any
when running on python < 2.5"""
for b in iterable:
if b:
return True
return False
def all(iterable):
"""all(iterable) -> Boolean - Returns true if no elements are
False. This is meant to be the equivalent of python 2.5's
all() when running on python < 2.5"""
for b in iterable:
if not b:
return False
return True
else:
import __builtin__
any = __builtin__.any
all = __builtin__.all
def iter_index(iterable, item):
"""iter_index(iterable, item) -> int - Iterates through iterator
until item is found, and returns the index inside the iterator.
iter_index is analogous to list.index for iterators."""
try:
itor = itertools.izip(iterable, itertools.count(0))
return itertools.dropwhile(lambda (v,c): v != item, itor).next()[1]
except StopIteration:
return -1
def eprint(*args):
"""eprint(*args) -> False - Prints the arguments, then returns
false. Useful inside a lambda expression, for example."""
for v in args:
print v,
print
def uniq(l):
"""uniq(l) -> List. Returns a new list consisting of elements that
test pairwise different for equality. Requires all elements to be
sortable, and runs in O(n log n) time."""
if len(l) == 0:
return []
a = copy.copy(l)
a.sort()
l1 = a[:-1]
l2 = a[1:]
return [a[0]] + [next for (i, next) in itertools.izip(l1, l2) if i != next]
class InstanceObject(object):
"""InstanceObject is a convenience class created to facilitate
creating of one-off objects with many fields. It simply translates
the passed kwargs on the constructor to a set of fields with
the right values."""
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
pre = "(%s " % self.__class__.__name__
items = [('%s: %s' % (k, str(v)))
for (k, v)
in sorted(self.__dict__.items())]
items_str = ('\n' + (' ' * len(pre))).join(items)
post = ')@%X' % id(self)
return pre + items_str + post
def write_source(self, prefix=""):
result = ""
for (k, v) in sorted(self.__dict__.items()):
if isinstance(v, InstanceObject):
newprefix = prefix + "." + k
result += v.write_source(newprefix)
else:
result += prefix
result += "." + str(k) + " = "
if isinstance(v, basestring):
result += "'" + str(v) + "'\n"
else:
result += str(v) + "\n"
return result
def append_to_dict_of_lists(dict, key, value):
"""Appends /value/ to /dict/[/key/], or creates entry such that
/dict/[/key/] == [/value/]."""
try:
dict[key].append(value)
except KeyError:
dict[key] = [value]
def version_string_to_list(version):
"""version_string_to_list converts a version string to a list of
numbers and strings:
version_string('0.1') -> [0, 1]
version_string('0.9.9alpha1') -> [0, 9, 9, alpha', 1]
"""
return LooseVersion(version).version
def versions_increasing(v1, v2):
return LooseVersion(v1) < LooseVersion(v2)
##############################################################################
# DummyView & DummyScene
class DummyScene(object):
def __init(self):
self.current_version = -1
self.current_pipeline = None
def get_selected_module_ids(self):
return []
def flushMoveActions(self, *args, **kwargs): pass
class DummyView(object):
def __init__(self):
self._scene = DummyScene()
def set_module_active(self, *args, **kwargs): pass
def set_module_computing(self, *args, **kwargs): pass
def set_module_success(self, *args, **kwargs): pass
def set_module_suspended(self, *args, **kwargs): pass
def set_module_error(self, *args, **kwargs): pass
def set_module_not_executed(self, *args, **kwargs): pass
def set_module_progress(self, *args, **kwargs): pass
def set_module_persistent(self, *args, **kwargs): pass
def set_execution_progress(self, *args, **kwargs): pass
def flushMoveActions(self, *args, **kwargs): pass
def scene(self):
return self._scene
################################################################################
# class for creating weak references to bound methods
# based on recipe http://code.activestate.com/recipes/81253/
# converted to work also in python 2.6.x without using deprecated methods
# not tested in python 2.5.x but it should work
class Ref(object):
""" Wraps any callable, most importantly a bound method, in a way that
allows a bound method's object to be GC'ed, while providing the same
interface as a normal weak reference. """
def __init__(self, fn):
try:
#try getting object, function, and class
o, f, c = fn.im_self, fn.im_func, fn.im_class
except AttributeError: #it's not a bound method
self._obj = None
self._func = fn
self._clas = None
else: #it's a bound method
if o is None: self._obj = None #... actually UN-bound
else: self._obj = weakref.ref(o)
self._func = f
self._clas = None
def __call__(self):
if self._obj is None: return self._func
elif self._obj() is None: return None
try:
import types
instance_method = types.MethodType
except ImportError:
#new is deprecated in python 2.6
import new
instance_method = new.instancemethod
return instance_method(self._func, self._obj(), self._clas)
###############################################################################
def xor(first, *others):
"""XORs bytestrings.
Example: xor('abcd', '\x20\x01\x57\x56') = 'Ac42'
"""
l = len(first)
first = [ord(c) for c in first]
for oth in others:
if len(oth) != l:
raise ValueError("All bytestrings should have the same length: "
"%d != %d" % (l, len(oth)))
first = [c ^ ord(o) for (c, o) in itertools.izip(first, oth)]
return ''.join(chr(c) for c in first)
def long2bytes(nb, length=None):
"""Turns a single integer into a little-endian bytestring.
Uses as many bytes as necessary or optionally pads to length bytes.
Might return a result longer than length.
Example: long2bytes(54321, 4) = b'\x31\xD4\x00\x00'
"""
if nb < 0:
raise ValueError
elif nb == 0:
result = b'\x00'
else:
result = b''
while nb > 0:
result += chr(nb & 0xFF)
nb = nb >> 8
if length is not None and len(result) < length:
result += '\x00' * (length - len(result))
return result
################################################################################
class Chdir(object):
def __init__(self, dirname):
self._old_dir = os.getcwd()
self._new_dir = dirname
def __enter__(self):
os.chdir(self._new_dir)
def __exit__(self, *args):
os.chdir(self._old_dir)
################################################################################
class _TestRegularFibo(object):
def __init__(self):
self.calls = 0
def f(self, x):
self.calls += 1
if x == 0: return 0
if x == 1: return 1
return self.f(x-1) + self.f(x-2)
class _TestMemoFibo(_TestRegularFibo):
f = memo_method(_TestRegularFibo.f)
class TestCommon(unittest.TestCase):
def test_append_to_dict_of_lists(self):
f = {}
self.assertEquals(f.has_key(1), False)
append_to_dict_of_lists(f, 1, 1)
self.assertEquals(f.has_key(1), True)
self.assertEquals(f[1], [1])
append_to_dict_of_lists(f, 1, 1)
self.assertEquals(f.has_key(1), True)
self.assertEquals(f[1], [1, 1])
append_to_dict_of_lists(f, 1, 2)
self.assertEquals(f.has_key(1), True)
self.assertEquals(f[1], [1, 1, 2])
append_to_dict_of_lists(f, 2, "Foo")
self.assertEquals(f.has_key(2), True)
self.assertEquals(f[2], ["Foo"])
def test_memo(self):
regular = _TestRegularFibo()
r1 = regular.f(20)
memoized = _TestMemoFibo()
r2 = memoized.f(20)
self.assertEqual(r1, 6765)
self.assertEqual(r2, 6765)
self.assertLess(memoized.calls, regular.calls)
self.assertEqual(regular.calls, 21891)
self.assertEqual(memoized.calls, 21)
def test_memo_2(self):
count = [0]
class C1(object):
pass
class C2(object):
pass
class TestClassMemo(object):
def __init__(self, cell):
self.cell = cell
@memo_method
def f(self, cl, x):
self.cell[0] += 1
return x
t = TestClassMemo(count)
self.assertEquals(count[0], 0)
t.f(C1, 0)
self.assertEquals(count[0], 1)
t.f(C1, 0)
self.assertEquals(count[0], 1)
t.f(C1, 1)
self.assertEquals(count[0], 2)
t.f(C2, 0)
self.assertEquals(count[0], 3)
def test_version_string_to_list(self):
self.assertEquals(version_string_to_list("0.1"), [0, 1])
self.assertEquals(version_string_to_list("1.0.2"), [1, 0, 2])
self.assertEquals(version_string_to_list("1.0.2beta"),
[1, 0, 2, 'beta'])
def test_ref(self):
class C(object):
def f(self):
return 'hello'
c = C()
cf = weakref.ref(c.f)
#bound methods behave not as expected. You want cf() not to be None
self.assertEquals(cf(), None)
#so we use the new class
cf = Ref(c.f)
#it behaves as expected
self.assertEquals(cf()(),'hello')
del c
#and after deletion the reference is dead
self.assertEquals(cf(), None)
def test_chdir(self):
def raise_exception():
with Chdir(tempfile.gettempdir()):
raise Exception
currentpath = os.getcwd()
with Chdir(tempfile.gettempdir()):
pass
self.assertEquals(os.getcwd(), currentpath)
self.assertRaises(Exception, raise_exception)
self.assertEquals(os.getcwd(), currentpath)
def test_deprecated(self):
import re
def canon_path(path):
path = os.path.realpath(path)
p, f = os.path.dirname(path), os.path.basename(path)
f = re.split(r'[$.]', f)[0]
return os.path.join(p, f)
def check_warning(msg, f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f(1, 2)
self.assertEqual(len(w), 1)
w, = w
self.assertEqual(w.message.args, (msg,))
self.assertEqual(w.category, VistrailsDeprecation)
self.assertTrue(canon_path(w.filename),
canon_path(__file__))
@deprecated('repl1')
def func1(a, b):
self.assertEqual((a, b), (1, 2))
@deprecated
def func2(a, b):
self.assertEqual((a, b), (1, 2))
check_warning('Call to deprecated function func1 replaced by repl1',
func1)
check_warning('Call to deprecated function func2', func2)
foo = None
class Foo(object):
@deprecated('repl1')
def meth1(s, a, b):
self.assertEqual((s, a, b), (foo, 1, 2))
@deprecated
def meth2(s, a, b):
self.assertEqual((s, a, b), (foo, 1, 2))
@staticmethod
@deprecated('repl3')
def meth3(a, b):
self.assertEqual((a, b), (1, 2))
@staticmethod
@deprecated
def meth4(a, b):
self.assertEqual((a, b), (1, 2))
foo = Foo()
check_warning('Call to deprecated function meth1 replaced by repl1',
foo.meth1)
check_warning('Call to deprecated function meth2',
foo.meth2)
check_warning('Call to deprecated function meth3 replaced by repl3',
foo.meth3)
check_warning('Call to deprecated function meth4',
foo.meth4)
if __name__ == '__main__':
unittest.main()
| |
from sqlagg.columns import SimpleColumn
from sqlagg.filters import BETWEEN, IN, EQ
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.sqlreport import SqlData, DataFormatter, TableDataFormat, DatabaseColumn
from custom.tdh.reports import UNNECESSARY_FIELDS, CHILD_HEADERS_MAP, INFANT_HEADERS_MAP, NEWBORN_HEADERS_MAP
def merge_rows(classification_sql_data, enroll_sql_data, treatment_sql_data):
result = []
classification_case_id_index = [id for id, column in enumerate(classification_sql_data.columns)
if column.slug == 'case_id'][0]
enroll_case_id_index = [id for id, column in enumerate(enroll_sql_data.columns)
if column.slug == 'case_id'][0]
treatment_case_id_index = [id for id, column in enumerate(treatment_sql_data.columns)
if column.slug == 'case_id'][0]
enroll_map = {row[enroll_case_id_index]: row for row in enroll_sql_data.rows}
treatment_map = {row[treatment_case_id_index]: row[:treatment_case_id_index]
+ row[treatment_case_id_index + 1:] for row in treatment_sql_data.rows}
for classification_row in classification_sql_data.rows:
row = classification_row[:classification_case_id_index] + classification_row[
classification_case_id_index + 1:]
classification_case_id = classification_row[classification_case_id_index]
if classification_case_id in enroll_map:
row = enroll_map[classification_case_id] + row
else:
row = [classification_case_id] + ['' for i in range(len(enroll_sql_data.headers) - 1)] + row
if classification_case_id in treatment_map:
row.extend(treatment_map[classification_case_id])
else:
row.extend(['' for i in range(len(treatment_sql_data.headers))])
result.append(row)
return result
class BaseSqlData(SqlData):
datatables = True
no_value = {'sort_key': 0, 'html': 0}
def header(self, header):
if self.__class__.__name__[0] == 'N':
return NEWBORN_HEADERS_MAP[header] if header in NEWBORN_HEADERS_MAP else header
elif self.__class__.__name__[0] == 'I':
return INFANT_HEADERS_MAP[header] if header in INFANT_HEADERS_MAP else header
else:
return CHILD_HEADERS_MAP[header] if header in CHILD_HEADERS_MAP else header
@property
def filters(self):
filters = [BETWEEN("date", "startdate", "enddate"), EQ('domain', 'domain')]
if self.config['emw']:
filters.append(IN('user_id', 'emw'))
return filters
@property
def group_by(self):
return []
@property
def columns(self):
columns = []
for k in self.group_by:
if k in ['zscore_hfa', 'zscore_wfa', 'zscore_wfh', 'mean_hfa', 'mean_wfa', 'mean_wfh']:
columns.append(DatabaseColumn(k, SimpleColumn(k),
format_fn=lambda x: "%.2f" % float(x if x else 0)))
else:
columns.append(DatabaseColumn(k, SimpleColumn(k)))
return columns
@property
def headers(self):
return [DataTablesColumn(self.header(k)) for k in self.group_by[1:]]
@property
def rows(self):
formatter = DataFormatter(TableDataFormat(self.columns, no_value=self.no_value))
return list(formatter.format(self.data, keys=self.keys, group_by=self.group_by))
class InfantConsultationHistory(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_consultation_history'
title = 'Infant Consultation History'
@property
def columns(self):
return EnrollChild().columns + InfantClassification(config=self.config).columns + InfantTreatment().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + InfantClassification(config=self.config).headers + InfantTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + InfantClassification(
config=self.config).group_by + InfantTreatment().group_by
@property
def rows(self):
return merge_rows(InfantClassification(config=self.config), EnrollChild(), InfantTreatment())
class InfantConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_consultation_history'
title = 'Infant Consultation History'
@property
def columns(self):
return EnrollChild().columns + InfantClassificationExtended(
config=self.config).columns + InfantTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + InfantClassificationExtended(
config=self.config).headers + InfantTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + InfantClassificationExtended(
config=self.config).group_by + InfantTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(InfantClassificationExtended(config=self.config), EnrollChild(),
InfantTreatmentExtended())
class NewbornConsultationHistory(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + NewbornClassification(
config=self.config).columns + NewbornTreatment().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + NewbornClassification(
config=self.config).headers + NewbornTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + NewbornClassification(
config=self.config).group_by + NewbornTreatment().group_by
@property
def rows(self):
return merge_rows(NewbornClassification(config=self.config), EnrollChild(), NewbornTreatment())
class NewbornConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + NewbornClassificationExtended(
config=self.config).columns + NewbornTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + NewbornClassificationExtended(
config=self.config).headers + NewbornTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + NewbornClassificationExtended(
config=self.config).group_by + NewbornTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(NewbornClassificationExtended(config=self.config), EnrollChild(),
NewbornTreatmentExtended())
class ChildConsultationHistory(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + ChildClassification(config=self.config).columns + ChildTreatment().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + ChildClassification(config=self.config).headers + ChildTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + ChildClassification(
config=self.config).group_by + ChildTreatment().group_by
@property
def rows(self):
return merge_rows(ChildClassification(config=self.config), EnrollChild(), ChildTreatment())
class ChildConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + ChildClassificationExtended(
config=self.config).columns + ChildTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + ChildClassificationExtended(
config=self.config).headers + ChildTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + ChildClassificationExtended(
config=self.config).group_by + ChildTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(ChildClassificationExtended(config=self.config), EnrollChild(), ChildTreatmentExtended())
class InfantClassification(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_classification'
title = 'Infant Classification'
@property
def group_by(self):
return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type',
'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa',
'zscore_wfh', 'mean_wfh', 'classification_deshydratation', 'classification_diahree',
'classification_infection', 'classification_malnutrition', 'classification_vih', 'inf_bac_qa',
'inf_bac_freq_resp', 'inf_bac_qc', 'inf_bac_qd', 'inf_bac_qe', 'inf_bac_qf', 'inf_bac_qg',
'inf_bac_qh', 'inf_bac_qj', 'inf_bac_qk', 'inf_bac_ql', 'inf_bac_qm', 'diarrhee_qa',
'alimentation_qa', 'alimentation_qb', 'alimentation_qc', 'alimentation_qd', 'alimentation_qf',
'alimentation_qg', 'alimentation_qh', 'vih_qa', 'vih_qb', 'vih_qc', 'vih_qd', 'vih_qe', 'vih_qf',
'vih_qg', 'vih_qh', 'vih_qi', 'vih_qj', 'vih_qk', 'vih_ql', 'other_comments']
class InfantClassificationExtended(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_classification'
title = 'Infant Classification'
@property
def columns(self):
from custom.tdh.models import TDHInfantClassificationFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHInfantClassificationFluff
return [DataTablesColumn(self.header(k)) for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHInfantClassificationFluff
return [k for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class NewbornClassification(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_classification'
title = 'Newborn Classification'
@property
def group_by(self):
return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type',
'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa',
'zscore_wfh', 'mean_wfh', 'classification_infection', 'classification_malnutrition',
'classification_occular', 'classification_poids', 'classification_vih', 'inf_bac_qa', 'inf_bac_qb',
'inf_bac_freq_resp', 'inf_bac_qd', 'inf_bac_qe', 'inf_bac_qf', 'inf_bac_qg', 'inf_bac_qh',
'inf_bac_qi', 'inf_bac_qj', 'poids_qa', 'inf_occ_qa', 'vih_qa', 'vih_qb', 'vih_qc', 'vih_qd',
'vih_qe', 'vih_qf', 'vih_qg', 'alimentation_qa', 'alimentation_qb', 'alimentation_qd',
'alimentation_qf', 'alimentation_qg', 'other_comments']
class NewbornClassificationExtended(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_classification'
title = 'Newborn Classification'
@property
def columns(self):
from custom.tdh.models import TDHNewbornClassificationFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHNewbornClassificationFluff
return [DataTablesColumn(self.header(k)) for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHNewbornClassificationFluff
return [k for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class ChildClassification(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'child_consultation_history'
title = 'Child Consultation History'
@property
def group_by(self):
return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type',
'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa',
'zscore_wfh', 'mean_wfh', 'measles_1', 'measles_2', 'opv_0', 'opv_1', 'opv_2', 'opv_3', 'penta_1',
'penta_2', 'penta_3', 'pneumo_1', 'pneumo_2', 'pneumo_3', 'rotavirus_1', 'rotavirus_2',
'rotavirus_3', 'yf', 'classification_anemie', 'classification_deshydratation',
'classification_diahree', 'classification_dysenterie', 'classification_malnutrition',
'classification_oreille', 'classification_paludisme', 'classification_pneumonie',
'classification_rougeole', 'classification_vih', 'classifications_graves', 'boire', 'vomit',
'convulsions_passe', 'lethargie', 'convulsions_present', 'toux_presence', 'toux_presence_duree',
'freq_resp', 'tirage', 'stridor', 'diarrhee', 'diarrhee_presence', 'diarrhee_presence_duree',
'sang_selles', 'conscience_agitation', 'yeux_enfonces', 'soif', 'pli_cutane', 'fievre_presence',
'fievre_presence_duree', 'fievre_presence_longue', 'tdr', 'urines_foncees', 'saignements_anormaux',
'raideur_nuque', 'ictere', 'choc', 'eruption_cutanee', 'ecoulement_nasal', 'yeux_rouge',
'ecoulement_oculaire', 'ulcerations', 'cornee', 'oreille', 'oreille_probleme', 'oreille_douleur',
'oreille_ecoulement', 'oreille_ecoulement_duree', 'oreille_gonflement', 'paleur_palmaire',
'oedemes', 'test_appetit', 'serologie_enfant', 'test_enfant', 'pneumonie_recidivante',
'diarrhee_dernierement', 'candidose_buccale', 'hypertrophie_ganglions_lymphatiques',
'augmentation_glande_parotide', 'test_mere', 'serologie_mere', 'other_comments']
class ChildClassificationExtended(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'child_classification'
title = 'Child Classification'
@property
def columns(self):
from custom.tdh.models import TDHChildClassificationFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHChildClassificationFluff
return [DataTablesColumn(self.header(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHChildClassificationFluff
return [k for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class EnrollChild(BaseSqlData):
table_name = "fluff_TDHEnrollChildFluff"
slug = 'enroll_child'
title = 'Enroll Child'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'dob', 'sex', 'village']
@property
def headers(self):
return [DataTablesColumn(self.header(k)) for k in self.group_by]
class EnrollChildExtended(BaseSqlData):
table_name = "fluff_TDHEnrollChildFluff"
slug = 'enroll_child'
title = 'Enroll Child'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHEnrollChildFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHEnrollChildFluff
return [DataTablesColumn(self.header(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHEnrollChildFluff
return [k for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class InfantTreatment(BaseSqlData):
table_name = "fluff_TDHInfantTreatmentFluff"
slug = 'infant_treatment'
title = 'Infant Treatment'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_treat_2',
'infection_grave_no_ref_treat_0', 'infection_grave_no_ref_treat_1',
'infection_grave_no_ref_treat_2', 'infection_grave_no_ref_treat_5', 'infection_locale_treat_0',
'infection_locale_treat_1', 'maladie_grave_treat_0', 'maladie_grave_treat_1']
class InfantTreatmentExtended(BaseSqlData):
table_name = "fluff_TDHInfantTreatmentFluff"
slug = 'infant_treatment'
title = 'Infant Treatment'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHInfantTreatmentFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHInfantTreatmentFluff
return [DataTablesColumn(self.header(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHInfantTreatmentFluff
return [k for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class NewbornTreatment(BaseSqlData):
table_name = "fluff_TDHNewbornTreatmentFluff"
slug = 'newborn_treatment'
title = 'Newborn Treatment'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_no_ref_treat_0',
'infection_grave_no_ref_treat_1', 'infection_locale_treat_0', 'infection_locale_treat_1',
'incapable_nourrir_treat_0', 'incapable_nourrir_treat_1']
class NewbornTreatmentExtended(BaseSqlData):
table_name = "fluff_TDHNewbornTreatmentFluff"
slug = 'newborn_treatment'
title = 'Newborn Treatment'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHNewbornTreatmentFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHNewbornTreatmentFluff
return [DataTablesColumn(self.header(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHNewbornTreatmentFluff
return [k for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class ChildTreatment(BaseSqlData):
table_name = "fluff_TDHChildTreatmentFluff"
slug = 'child_treatment'
title = 'Child Treatment'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'pneumonie_grave_treat_0',
'pneumonie_grave_treat_1', 'pneumonie_grave_treat_4', 'pneumonie_grave_no_ref_treat_0',
'pneumonie_grave_no_ref_treat_1', 'pneumonie_grave_no_ref_treat_3',
'pneumonie_grave_no_ref_treat_5', 'pneumonie_grave_no_ref_treat_6', 'pneumonie_treat_0',
'pneumonie_treat_1', 'deshydratation_severe_pas_grave_perfusion_treat_3',
'deshydratation_severe_pas_grave_perfusion_treat_4',
'deshydratation_severe_pas_grave_perfusion_treat_5',
'deshydratation_severe_pas_grave_perfusion_treat_6',
'deshydratation_severe_pas_grave_perfusion_treat_8',
'deshydratation_severe_pas_grave_perfusion_treat_9',
'deshydratation_severe_pas_grave_perfusion_treat_10',
'deshydratation_severe_pas_grave_perfusion_treat_11',
'deshydratation_severe_pas_grave_perfusion_treat_15',
'deshydratation_severe_pas_grave_perfusion_treat_16',
'deshydratation_severe_pas_grave_sng_treat_2', 'deshydratation_severe_pas_grave_sng_treat_3',
'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_3',
'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_4', 'signes_deshydratation_treat_0',
'signes_deshydratation_treat_3', 'pas_deshydratation_treat_1', 'dysenterie_treat_1',
'dysenterie_treat_2', 'dysenterie_treat_3', 'diahree_persistante_treat_0',
'diahree_persistante_treat_1', 'paludisme_grave_treat_0', 'paludisme_grave_treat_1',
'paludisme_grave_treat_2', 'paludisme_grave_treat_4', 'paludisme_grave_treat_5',
'paludisme_grave_treat_7', 'paludisme_grave_no_ref_treat_0', 'paludisme_grave_no_ref_treat_1',
'paludisme_grave_no_ref_treat_2', 'paludisme_grave_no_ref_treat_3',
'paludisme_grave_no_ref_treat_5', 'paludisme_grave_no_ref_treat_6', 'paludisme_simple_treat_1',
'paludisme_simple_treat_2', 'paludisme_simple_treat_3', 'paludisme_simple_treat_4',
'paludisme_simple_treat_6', 'rougeole_compliquee_treat_0', 'rougeole_compliquee_treat_1',
'rougeole_compliquee_treat_2', 'rougeole_compliquee_treat_3', 'rougeole_complications_treat_0',
'rougeole_complications_treat_1', 'rougeole_treat_0', 'rougeole_treat_1', 'rougeole_treat_2',
'rougeole_treat_3', 'antecedent_rougeole_treat_0', 'antecedent_rougeole_treat_1',
'mastoidite_treat_0', 'mastoidite_treat_1', 'mastoidite_treat_2',
'infection_aigue_oreille_treat_0', 'infection_aigue_oreille_treat_1', 'anemie_grave_treat_0',
'anemie_treat_0', 'anemie_treat_1', 'anemie_treat_2', 'anemie_treat_3', 'anemie_treat_4',
'anemie_treat_5', 'anemie_treat_6', 'mass_treat_2', 'mass_treat_3', 'mass_treat_4', 'mass_treat_5',
'mass_treat_7', 'mass_treat_8', 'mam_treat_2', 'mam_treat_3', 'mam_treat_5', 'mam_treat_6',
'mam_treat_7', 'pas_malnutrition_treat_2', 'pas_malnutrition_treat_3',
'vih_symp_confirmee_treat_1', 'vih_symp_confirmee_treat_2', 'vih_symp_confirmee_treat_4',
'vih_confirmee_treat_1', 'vih_confirmee_treat_2', 'vih_confirmee_treat_4',
'vih_symp_probable_treat_1', 'vih_symp_probable_treat_2', 'vih_symp_probable_treat_3',
'vih_possible_treat_1', 'vih_possible_treat_2', 'vih_possible_treat_3',
'paludisme_grave_tdr_negatif_treat_0', 'paludisme_grave_tdr_negatif_treat_1',
'paludisme_grave_tdr_negatif_treat_3', 'paludisme_grave_tdr_negatif_treat_4',
'paludisme_grave_tdr_negatif_treat_6', 'vitamine_a']
class ChildTreatmentExtended(BaseSqlData):
table_name = "fluff_TDHChildTreatmentFluff"
slug = 'child_treatment'
title = 'Child Treatment'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHChildTreatmentFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHChildTreatmentFluff
return [DataTablesColumn(self.header(k)) for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHChildTreatmentFluff
return [k for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
| |
"""Unit test module for auth"""
from collections import namedtuple
import datetime
from flask import url_for
from flask_webtest import SessionScope
import pytest
from werkzeug.exceptions import Unauthorized
from portal.extensions import db
from portal.models.auth import AuthProvider, Token, create_service_token
from portal.models.client import Client, validate_origin
from portal.models.intervention import INTERVENTION
from portal.models.role import ROLE
from portal.models.user import (
RoleError,
User,
UserRelationship,
add_role,
add_user,
)
from tests import OAUTH_INFO_PROVIDER_LOGIN, TEST_USER_ID
@pytest.fixture
def test_auth_user(add_user):
# Create a user
email = 'localuser@test.com'
password = 'Password1'
user = add_user(
username='username',
email=email,
password=password
)
return user
def test_nouser_logout(client, initialized_db):
"""Confirm logout works without a valid user"""
response = client.get('/logout')
assert 302 == response.status_code
def test_local_user_add(client):
"""Add a local user via flask_user forms"""
data = {
'password': 'one2Three',
'retype_password': 'one2Three',
'email': 'otu@example.com'}
response = client.post('/user/register', data=data)
assert response.status_code == 302
new_user = User.query.filter_by(username=data['email']).first()
assert new_user.active
def test_local_login_valid_username_and_password(test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Attempt to login with valid creds
response = local_login(test_auth_user.email, 'Password1')
# Validate login was successful
assert response.status_code == 200
assert test_auth_user.password_verification_failures == 0
def test_local_login_failure_increments_lockout(test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Attempt to login with an invalid password
response = local_login(test_auth_user.email, 'invalidpassword')
# Verify there was a password failure
db.session.refresh(test_auth_user)
assert test_auth_user.password_verification_failures == 1
def test_local_login_valid_username_and_password_resets_lockout(
test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Mock a failed password attempt
test_auth_user.add_password_verification_failure()
assert test_auth_user.password_verification_failures == 1
# Atempt to login with valid creds
response = local_login(test_auth_user.email, 'Password1')
# Verify lockout was reset
db.session.refresh(test_auth_user)
assert test_auth_user.password_verification_failures == 0
def test_local_login_lockout_after_unsuccessful_attempts(
test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Use up all of the permitted login attempts
attempts = test_auth_user.failed_login_attempts_before_lockout - 1
for failureIndex in range(0, attempts):
response = local_login(test_auth_user.email, 'invalidpassword')
assert response.status_code is 200
db.session.refresh(test_auth_user)
assert test_auth_user.password_verification_failures == (
failureIndex + 1)
assert not test_auth_user.is_locked_out
# Validate that after using up all permitted attempts
# the next is locked out
response = local_login(test_auth_user.email, 'invalidpassword')
db.session.refresh(test_auth_user)
assert test_auth_user.is_locked_out
def test_local_login_verify_lockout_resets_after_lockout_period(
test_auth_user):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Lock the user out
attempts = test_auth_user.failed_login_attempts_before_lockout
for failureIndex in range(0, attempts):
test_auth_user.add_password_verification_failure()
# Verify the user is locked out
assert test_auth_user.is_locked_out
# Move time to the end of the lockout period
test_auth_user.last_password_verification_failure = \
datetime.datetime.utcnow() - test_auth_user.lockout_period_timedelta
# Verify we are no longer locked out
assert not test_auth_user.is_locked_out
def test_local_login_verify_cant_login_when_locked_out(
test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Lock the user out
attempts = test_auth_user.failed_login_attempts_before_lockout
for failureIndex in range(0, attempts):
test_auth_user.add_password_verification_failure()
assert test_auth_user.is_locked_out
# Atempt to login with valid creds
response = local_login(test_auth_user.email, 'Password1')
# Verify the user is still locked out
assert test_auth_user.is_locked_out
def test_register_now(
app, promote_user, login, assert_redirects, client, test_user):
"""Initiate process to register exiting account"""
app.config['NO_CHALLENGE_WO_DATA'] = False
# added to avoid detached instance error
test_user = db.session.merge(test_user)
test_user.password = None
test_user.birthdate = '1998-01-31'
promote_user(role_name=ROLE.ACCESS_ON_VERIFY.value)
user = db.session.merge(test_user)
email = user.email
login()
response = client.get('/api/user/register-now')
assert_redirects(response, url_for('user.register', email=email))
def test_client_add(promote_user, login, client):
"""Test adding a client application"""
origins = "https://test.com https://two.com"
promote_user(role_name=ROLE.APPLICATION_DEVELOPER.value)
login()
response = client.post('/client', data=dict(
application_origins=origins))
assert 302 == response.status_code
client = Client.query.filter_by(user_id=TEST_USER_ID).first()
assert client.application_origins == origins
def test_client_bad_add(
promote_user, login, client, initialized_db):
"""Test adding a bad client application"""
promote_user(role_name=ROLE.APPLICATION_DEVELOPER.value)
login()
response = client.post(
'/client',
data=dict(application_origins="bad data in")).get_data(
as_text=True)
assert "Invalid URL" in response
def test_client_edit(client, test_user_login, test_client):
"""Test editing a client application"""
test_url = 'http://tryme.com'
origins = "{} {}".format(test_client.application_origins, test_url)
response = client.post(
'/client/{0}'.format(test_client.client_id),
data=dict(
callback_url=test_url, application_origins=origins,
application_role=INTERVENTION.DEFAULT.name))
assert 302 == response.status_code
test_client = Client.query.get('test_client')
assert test_client.callback_url == test_url
invalid_url = "http://invalid.org"
response2 = client.post(
'/client/{0}'.format(test_client.client_id),
data=dict(
callback_url=invalid_url, application_origins=origins,
application_role=INTERVENTION.DEFAULT.name))
# 200 response, because page is reloaded with validation errors
assert 200 == response2.status_code
error_text = 'URL host must match a provided Application Origin URL'
assert error_text in response2.get_data(as_text=True)
test_client = Client.query.get('test_client')
assert test_client.callback_url != invalid_url
def test_callback_validation(client, test_user_login, test_client):
"""Confirm only valid urls can be set"""
response = client.post(
'/client/{0}'.format(test_client.client_id),
data=dict(
callback_url='badprotocol.com',
application_origins=test_client.application_origins))
assert 200 == response.status_code
test_client = Client.query.get('test_client')
assert test_client.callback_url is None
def test_service_account_creation(test_client):
"""Confirm we can create a service account and token"""
test_user = User.query.get(TEST_USER_ID)
service_user = test_user.add_service_account()
with SessionScope(db):
db.session.add(service_user)
db.session.add(test_client)
db.session.commit()
service_user = db.session.merge(service_user)
test_client = db.session.merge(test_client)
# Did we get a service account with the correct roles and relationships
assert len(service_user.roles) == 1
assert 'service' == service_user.roles[0].name
sponsorship = UserRelationship.query.filter_by(
other_user_id=service_user.id).first()
assert sponsorship.user_id == TEST_USER_ID
assert sponsorship.relationship.name == 'sponsor'
# Can we get a usable Bearer Token
create_service_token(client=test_client, user=service_user)
token = Token.query.filter_by(user_id=service_user.id).first()
assert token
# The token should have a very long life
assert (token.expires > datetime.datetime.utcnow()
+ datetime.timedelta(days=364))
def test_service_account_promotion(test_client):
"""Confirm we can not promote a service account """
test_user = User.query.get(TEST_USER_ID)
service_user = test_user.add_service_account()
with SessionScope(db):
db.session.add(service_user)
db.session.commit()
service_user = db.session.merge(service_user)
# try to promote - which should fail
assert pytest.raises(RoleError, add_role, service_user,
ROLE.APPLICATION_DEVELOPER.value)
assert len(service_user.roles) == 1
def test_token_status(client, test_user):
with SessionScope(db):
test_client = Client(
client_id='test-id', client_secret='test-secret',
user_id=TEST_USER_ID)
token = Token(
access_token='test-token',
client=test_client,
user_id=TEST_USER_ID,
token_type='bearer',
expires=(datetime.datetime.utcnow() +
datetime.timedelta(seconds=30)))
db.session.add(test_client)
db.session.add(token)
db.session.commit()
token = db.session.merge(token)
response = client.get(
"/oauth/token-status",
headers={'Authorization': 'Bearer {}'.format(token.access_token)})
assert 200 == response.status_code
data = response.json
assert pytest.approx(30, 5) == data['expires_in']
def test_token_status_wo_header(client):
"""Call for token_status w/o token should return 401"""
response = client.get("/oauth/token-status")
assert 401 == response.status_code
def test_origin_validation(app, test_client):
client_url = test_client._redirect_uris
local_url = "http://{}/home?test".format(
app.config.get('SERVER_NAME'))
invalid_url = 'http://invalid.org'
assert validate_origin(client_url)
assert validate_origin(local_url)
assert pytest.raises(Unauthorized, validate_origin, invalid_url)
def test_origin_validation_origin_not_in_whitelist(app):
valid_origin = 'www.domain.com'
app.config['CORS_WHITELIST'] = [valid_origin]
invalid_origin = 'www.invaliddomain.com'
url = 'http://{}/'.format(invalid_origin)
assert pytest.raises(Unauthorized, validate_origin, url)
def test_origin_validation_origin_in_whitelist(app):
valid_origin = 'www.domain.com'
app.config['CORS_WHITELIST'] = [valid_origin]
url = 'http://{}/'.format(valid_origin)
assert validate_origin(url)
def test_oauth_with_new_auth_provider_and_new_user(login):
# Login using the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify a new user was created
user = User.query.filter_by(
email=OAUTH_INFO_PROVIDER_LOGIN['email']
).first()
assert user
# Verify a new auth provider was created
assert AuthProvider.query.filter_by(
provider=OAUTH_INFO_PROVIDER_LOGIN['provider_name'],
provider_id=OAUTH_INFO_PROVIDER_LOGIN['provider_id'],
user_id=user.id,
).first()
def test_oauth_with_new_auth_provider_and_new_user_unicode_name(login):
# Set a unicode name
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info['last_name'] = 'Bugn\xed'
# Login using the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify a new user was created
user = User.query.filter_by(
last_name=OAUTH_INFO_PROVIDER_LOGIN['last_name']
).first()
assert user
# Verify a new auth provider was created
assert AuthProvider.query.filter_by(
provider=OAUTH_INFO_PROVIDER_LOGIN['provider_name'],
provider_id=OAUTH_INFO_PROVIDER_LOGIN['provider_id'],
user_id=user.id,
).first()
pass
def test_oauth_with_new_auth_provider_and_existing_user(login):
# Create the user
user = add_user_from_oauth_info(OAUTH_INFO_PROVIDER_LOGIN)
# Login through the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify the response returned successfully
assert response.status_code == 200
# Verify a new auth provider was created
assert AuthProvider.query.filter_by(
provider=OAUTH_INFO_PROVIDER_LOGIN['provider_name'],
provider_id=OAUTH_INFO_PROVIDER_LOGIN['provider_id'],
user_id=user.id,
).first()
def test_oauth_with_existing_auth_provider_and_existing_user(login):
# Create the user
user = add_user_from_oauth_info(OAUTH_INFO_PROVIDER_LOGIN)
# Create the auth provider
add_auth_provider(OAUTH_INFO_PROVIDER_LOGIN, user)
# Login through the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify the response returned successfully
assert response.status_code == 200
def test_oauth_when_mock_provider_fails_to_get_user_json(login):
# Make the mock provider fail to get user json
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info['fail_to_get_user_json'] = True
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info)
# Verify 500
assert response.status_code == 500
def test_oauth_when_non_required_value_undefined(login):
# Make the mock provider fail to get user json
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
del oauth_info['birthdate']
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info)
# Verify the response returned successfully
assert response.status_code == 200
def test_oauth_when_required_value_undefined(login):
# Make the mock provider fail to get user json
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
del oauth_info['provider_id']
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info)
# Verify 500
assert response.status_code == 500
def test_oauth_with_invalid_token(login, assert_redirects):
# Set an invalid token
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info.pop('token', None)
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info, follow_redirects=False)
# Verify force reload
assert_redirects(response, oauth_info['next'])
def add_user_from_oauth_info(oauth_info):
user_to_add = namedtuple('Mock', oauth_info.keys())(*oauth_info.values())
user = add_user(user_to_add)
db.session.commit()
return user
def add_auth_provider(oauth_info, user):
auth_provider = AuthProvider(
provider=oauth_info['provider_name'],
provider_id=oauth_info['provider_id'],
user=user,
)
db.session.add(auth_provider)
db.session.commit()
return auth_provider
| |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import List, Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from shared.operand import ImmOperandType, RegOperandType, OperandType
from .jump import Jump
from .straight_line_insn import StraightLineInsn
from ..config import Config
from ..program import ProgInsn, Program
from ..model import LoopStack, Model
from ..snippet import LoopSnippet, ProgSnippet, Snippet
from ..snippet_gen import GenCont, GenRet, SnippetGen
class Loop(SnippetGen):
'''A generator that generates a LOOP / LOOPI'''
# An iteration count: the encoded value, the implied number of iterations
# and a possible loop warp.
IterCount = Tuple[int, int, Optional[LoopSnippet.Warp]]
# The shape of a loop that's being generated. Consists of bodysize and an
# iteration count, as described above.
Shape = Tuple[int, IterCount]
# The individual pieces of a generated loop. The tuple is (bodysize,
# hd_insn, body_snippet, model_afterwards, warp)
Pieces = Tuple[int, ProgInsn, Snippet, Model, Optional[LoopSnippet.Warp]]
def __init__(self, cfg: Config, insns_file: InsnsFile) -> None:
super().__init__()
self.jump_gen = Jump(cfg, insns_file)
self.sli_gen = StraightLineInsn(cfg, insns_file)
self.loop = self._get_named_insn(insns_file, 'loop')
self.loopi = self._get_named_insn(insns_file, 'loopi')
# loop expects operands: grs, bodysize
if not (len(self.loop.operands) == 2 and
isinstance(self.loop.operands[0].op_type, RegOperandType) and
self.loop.operands[0].op_type.reg_type == 'gpr' and
isinstance(self.loop.operands[1].op_type, ImmOperandType) and
not self.loop.operands[1].op_type.signed):
raise RuntimeError('LOOP instruction from instructions file is '
'not the shape expected by the Loop generator.')
# loopi expects operands: iterations, bodysize
if not (len(self.loopi.operands) == 2 and
isinstance(self.loopi.operands[0].op_type, ImmOperandType) and
not self.loopi.operands[0].op_type.signed and
isinstance(self.loopi.operands[1].op_type, ImmOperandType) and
not self.loopi.operands[1].op_type.signed):
raise RuntimeError('LOOPI instruction from instructions file is '
'not the shape expected by the Loop generator.')
self.loopi_prob = 0.5
loop_weight = cfg.insn_weights.get('loop')
loopi_weight = cfg.insn_weights.get('loopi')
sum_weights = loop_weight + loopi_weight
if sum_weights == 0:
self.disabled = True
else:
self.loopi_prob = loopi_weight / sum_weights
self.cfg_max_iters = cfg.ranges.get_max('loop-iters')
if self.cfg_max_iters is not None and self.cfg_max_iters < 1:
raise RuntimeError(f'Invalid max-loop-iters value of '
f'{self.cfg_max_iters}: this must be '
f'at least 1.')
self.cfg_max_tail = cfg.ranges.get_max('loop-tail-insns')
if self.cfg_max_tail is not None and self.cfg_max_tail < 1:
raise RuntimeError(f'Invalid max-loop-tail-insns value of '
f'{self.cfg_max_tail}: this must be '
f'at least 1.')
def _pick_loop_iterations(self,
max_iters: int,
model: Model) -> Optional[IterCount]:
'''Pick the number of iterations for a LOOP loop
To do this, we pick a register whose value we know and which doesn't
give us a ridiculous number of iterations (checking model.fuel).
max_iters is the maximum number of iterations possible, given how much
fuel we have left.
Returns a tuple (idx, iters, warp) where idx is the register index,
iters is the number of iterations that will run and warp is either None
or is a pair (from, to) giving a loop warp that should apply. iters
will always be at most max_iters.
'''
# Most of the time, we want to generate a very small number of
# iterations (since each iteration runs the same instructions, there's
# not much point in generating lots of them). However, we don't want to
# pick 1 iteration too often (since that's equivalent to no loop at
# all). To implement this, we use a weighting of 1/(1 + abs(x - 2)) and
# restrict to nonzero values less than 10.
#
# However we also have a coverage point that we'd like to hit where we
# generate the maximal number of iterations. Thus we also allow the
# case where a register is all ones and give it a weighting of 0.001:
# we shouldn't hit this very often (and only need to see it once). In
# practice, this will mostly come up because we decided to do a LOOP
# and the only nonzero known value is 0xffffffff. We only allow this if
# max_iters is at least 10.
max_iters = min(max_iters, 10)
if self.cfg_max_iters is not None:
max_iters = min(max_iters, self.cfg_max_iters)
allow_maximal_loop = max_iters >= 10
# Iterate over the known registers, trying to pick a weight
poss_pairs = [] # type: List[Tuple[int, int]]
weights = [] # type: List[float]
for idx, value in model.regs_with_known_vals('gpr'):
weight = 0.0
if 0 < value <= max_iters:
weight = 1 / (1 + abs(value - 2))
elif allow_maximal_loop and value == (1 << 32) - 1:
weight = 0.001
if weight:
poss_pairs.append((idx, value))
weights.append(weight)
if not poss_pairs:
return None
idx, actual_iters = random.choices(poss_pairs, weights=weights)[0]
if actual_iters <= max_iters:
warp = None
iters = actual_iters
else:
# We will start '1 + lo' iterations before the warp (and finish
# 'lo' of them). We'll then finish 'max_iters - lo' iterations
# after the warp. Both '1 + lo' and 'max_iters - lo' must be
# positive.
lo = random.randint(0, max_iters - 1)
hi = actual_iters - (max_iters - lo)
assert 0 <= lo <= hi < actual_iters
warp = (lo, hi)
iters = max_iters
return (idx, iters, warp)
def _pick_loopi_iterations(self,
max_iters: int,
op_type: OperandType,
model: Model) -> Optional[IterCount]:
'''Pick the number of iterations for a LOOPI loop
max_iters is the maximum number of iterations possible, given how much
fuel we have left.
Returns the encoded and decoded number of iterations.
'''
assert isinstance(op_type, ImmOperandType)
iters_range = op_type.get_op_val_range(model.pc)
assert iters_range is not None
iters_lo, iters_hi = iters_range
# Constrain iters_hi if the max-loop-iters configuration value was set.
if self.cfg_max_iters is not None:
iters_hi = min(iters_hi, self.cfg_max_iters)
if iters_hi < iters_lo:
return None
# Very occasionally, generate iters_hi iterations (the maximum number
# representable) if we've got fuel for it. We don't do this often,
# because the instruction sequence will end up just testing loop
# handling and be very inefficient for testing anything else.
if max_iters >= iters_hi and random.random() < 0.01:
enc_val = op_type.op_val_to_enc_val(iters_hi, model.pc)
# This should never fail, because iters_hi was encodable.
assert enc_val is not None
return (enc_val, iters_hi, None)
# The rest of the time, we don't usually (95%) generate more than 3
# iterations (because the instruction sequences are rather
# repetitive!). Also, don't generate 0 iterations here, even though
# it's encodable. That causes an error, so we'll do that in a separate
# generator.
if random.random() < 0.95:
tgt_max_iters = min(max_iters, 3)
else:
tgt_max_iters = 10000
ub = min(iters_hi, max_iters, tgt_max_iters)
lb = max(iters_lo, 1)
if ub < lb:
return None
# Otherwise, pick a value uniformly in [iters_lo, iters_hi]. No need
# for clever weighting: in the usual case, there are just 3
# possibilities!
num_iters = random.randint(lb, ub)
enc_val = op_type.op_val_to_enc_val(num_iters, model.pc)
# This should never fail: the choice should have been in the encodable
# range.
assert enc_val is not None
return (enc_val, num_iters, None)
def pick_iterations(self,
op_type: OperandType,
bodysize: int,
model: Model) -> Optional[IterCount]:
'''Pick the number of iterations for a loop
Returns the encoded value (register index or encoded number of
iterations), together with the number of iterations that implies.
'''
assert bodysize > 0
min_fuel_per_iter = 1 if bodysize == 1 else 2
# model.fuel - 2 is the fuel after executing the LOOP/LOOPI instruction
# and before executing the minimum-length single-instruction
# continuation.
max_iters = (model.fuel - 2) // min_fuel_per_iter
if isinstance(op_type, RegOperandType):
assert op_type.reg_type == 'gpr'
return self._pick_loop_iterations(max_iters, model)
else:
assert isinstance(op_type, ImmOperandType)
return self._pick_loopi_iterations(max_iters, op_type, model)
def _pick_loop_shape(self,
op0_type: OperandType,
op1_type: ImmOperandType,
space_here: int,
check: bool,
model: Model,
program: Program) -> Optional[Shape]:
'''Pick the size of loop and number of iterations
op0_type is the type of the first operand (either 'grs' for loop or
'iterations' for loopi). op1_type is the type of the bodysize operand.
space_here is the number of instructions' space available at the
current position. If check is true, we're generating a genuine loop and
should perform checks like making sure there's enough space to generate
everything.
'''
# The first upper bound on bodysize is that we've got to have an empty
# space for the loop body.
#
# Note: This doesn't allow us to generate a "loop" that encloses
# previously generated code. So, for example, we couldn't do something
# like
#
# loopi 10, 3
# jal x0, .+8
# jal x0, .+100 // an isolated instruction that ran earlier
# addi x0, 0 // end of loop
#
# Since we can generate jumps in the loop, we might "fill in
# the middle" afterwards. However, we'll never make a loop that
# "contains" instructions we executed before.
#
# To weaken this, we would need to just require that the end of the
# loop is not yet taken. But that sounds a bit hard: let's not worry
# about it for now.
assert 3 <= space_here
max_bodysize = space_here - 2
# Another upper bound comes from program.space. If bodysize is 2 or
# more, our body will need to generate at least 2 instructions (either
# a straight line of length bodysize, or a jump from the start and then
# a straight line instruction at the end). In this case, we need space
# for at least 3 instructions (including the LOOP/LOOPI instruction
# itself).
#
# We know that program.space is at least 2 (checked in gen()), but if
# it's 2, we can only have a bodysize of 1.
assert 2 <= program.space
if program.space == 2:
max_bodysize = min(max_bodysize, 1)
bodysize_range = op1_type.get_op_val_range(model.pc)
assert bodysize_range is not None
bs_min, bs_max = bodysize_range
if max_bodysize < max(1, bs_min):
return None
# Decide on the bodysize value. tail_pc is the address of the last
# instruction in the loop body.
bodysize = random.randint(max(1, bs_min), min(bs_max, max_bodysize))
if check:
tail_pc = model.pc + 4 * bodysize
assert program.get_insn_space_at(tail_pc) >= 2
iters = self.pick_iterations(op0_type, bodysize, model)
if iters is None:
return None
return (bodysize, iters)
def _gen_tail_insns(self,
num_insns: int,
model: Model,
program: Program) -> Optional[Tuple[List[ProgInsn],
Model]]:
return self.sli_gen.gen_some(num_insns, model, program)
def _gen_tail(self,
num_insns: int,
model: Model,
program: Program) -> Optional[Tuple[Snippet, Model]]:
pc = model.pc
ret = self._gen_tail_insns(num_insns, model, program)
if ret is None:
return None
insns, model = ret
assert len(insns) == num_insns
snippet = ProgSnippet(pc, insns)
snippet.insert_into_program(program)
return (snippet, model)
def _gen_body(self,
bodysize: int,
single_iter: bool,
bogus_insn: ProgInsn,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
'''Generate the body of a loop
The model is currently sitting at the start of the loop body.
model.fuel is assumed to be the amount of fuel needed for a single
iteration of the loop. If model.fuel is 1, bodysize must also be 1.
This updates model and program unconditionally, trashing them if it
returns None.
'''
assert 1 <= bodysize
assert 1 <= model.fuel
assert bodysize == 1 or model.fuel > 1
match_addr = model.pc + 4 * bodysize
# If bodysize is at least 2, we need to generate at least 2
# instructions (either a straight line of length bodysize, or a jump
# from the start and then a straight line instruction at the end). We
# should definitely have space for that.
min_space_needed = 1 if bodysize == 1 else 2
assert min_space_needed <= program.space
# Pick the tail length. The tail is a sequence of straight-line
# instructions that leads up to the end of the loop body. There must be
# at least one (part of the spec for a loop).
#
# The maximum tail length depends on model.fuel: if bodysize is huge,
# but fuel is just 2 (say), we can generate a body consisting of a jump
# to the end and then a single instruction tail.
#
# This gives a bound on tail length of model.fuel - 1 (to allow an
# instruction for the jump), unless bodysize <= model.fuel, in which
# case we can generate a loop body that's just a tail.
if bodysize <= model.fuel:
max_tail_len = bodysize
else:
max_tail_len = min(bodysize, model.fuel - 1)
if self.cfg_max_tail is not None:
max_tail_len = min(max_tail_len, self.cfg_max_tail)
# program.space gives another bound on the tail length. If the bodysize
# is large enough that we'll need to jump to the tail, the tail can't
# be more than program.space - 1 in length. If we don't need to
# generate a jump instruction, it can be up to program.space.
if bodysize <= program.space:
max_tail_len = min(max_tail_len, program.space)
else:
assert 2 <= program.space
max_tail_len = min(max_tail_len, program.space - 1)
assert max_tail_len >= 1
tail_len = random.randint(1, max_tail_len)
# When we're generating the body of the loop, we mustn't update a
# register whose value we relied on. For example, we might know that x3
# contains 0x20 and use it as a load address, but then write 0x30 to
# it. This will go badly when we come around again!
#
# To avoid the problem, we explicitly pick the registers that we are
# going to leave unmolested and mark them as special in the model. We
# then write None to all other known registers (to model the fact that
# they have *some* architectural value; we just don't know what it is).
#
# Mark 50% of these registers as not-to-be-touched.
#
# This pass is skipped if we know we're doing exactly one iteration
const_token = model.push_const()
if not single_iter:
for rt, regs in model.all_regs_with_known_vals().items():
for reg_idx, _ in regs:
if random.random() < 0.5:
model.mark_const(rt, reg_idx)
else:
model.forget_value(rt, reg_idx)
# Unconditionally mark x1 as constant, to avoid unbalanced push/pop
# sequences in the loop.
#
# TODO: This means we don't use x1 inside loop bodies; we need to
# fix that.
model.mark_const('gpr', 1)
# If the tail isn't the entire loop, generate the first part of the
# loop body. While doing so, we constrain fuel (to save enough for the
# tail) and work with a program where we've inserted copies of a dummy
# instruction over all the addresses that tail will use, plus the first
# instruction after the loop.
head_snippet = None
assert tail_len <= bodysize
tail_start = model.pc + 4 * (bodysize - tail_len)
if tail_len < bodysize:
assert tail_len < model.fuel
model.fuel -= tail_len + 1
if model.fuel > 0:
# If there's some fuel for a head that isn't just a jump to the
# tail, generate it here.
prog0 = program.copy()
bogus_tail_insns = [bogus_insn] * (tail_len + 1)
prog0.add_insns(tail_start, bogus_tail_insns)
head_snippet, model = cont(model, prog0, False)
# Generation of the head might have failed, but that's actually
# ok: we'll just start the loop body with the jump to the tail.
# If it succeeded, add its instructions to program.
if head_snippet is not None:
head_snippet.insert_into_program(program)
# Add one back to model.fuel for the jump instruction we might need
# to get to the tail.
model.fuel += 1
if model.pc != tail_start:
# If model hasn't ended up at tail_start, insert a jump to get
# there. Use program rather than prog0 because prog0 has a
# dummy instruction in the way. Note that jump_gen.gen_tgt will
# insert the jump instruction that it generates into program.
jump_ret = self.jump_gen.gen_tgt(model, program, tail_start)
if jump_ret is None:
return None
jump_insn, jump_snippet, model = jump_ret
assert model.pc == tail_start
head_snippet = Snippet.cons_option(head_snippet, jump_snippet)
# Add tail_len fuel back to model (undoing the rest of the
# subtraction we did before we started generating the head)
model.fuel += tail_len
# We should always have generated at least something at this point
# (because the original value of model.pc can't have been
# tail_start if tail_len was less than bodysize).
#
# We've also updated the model as if it just ran the head and have
# inserted head_snippet into program.
assert head_snippet is not None
# At this point, we've generated any head snippet and model.pc now
# points at tail_start. Generate the remaining straight line
# instructions that we need.
assert model.pc == tail_start
tail_ret = self._gen_tail(tail_len, model, program)
if tail_ret is None:
return None
tail_snippet, model = tail_ret
assert model.pc == match_addr
snippet = Snippet.cons_option(head_snippet, tail_snippet)
# Remove the const annotations that we added to the model
model.pop_const(const_token)
return (snippet, model)
def pick_loop_insn(self) -> Insn:
'''Pick either LOOP or LOOPI'''
is_loopi = random.random() < self.loopi_prob
return self.loopi if is_loopi else self.loop
def _setup_body(self,
hd_insn: ProgInsn,
end_addr: int,
model: Model,
program: Program,
has_warp: bool) -> Tuple[Model, Optional[LoopStack]]:
'''Set up a Model for use in body; insert hd_insn into program
This may hack model.loop_stack to avoid generating further loops. If it
does so, it will return the "real" loop stack as a second return value.
'''
body_model = model.copy()
body_model.update_for_insn(hd_insn)
body_model.pc += 4
body_model.loop_stack.push(end_addr)
program.add_insns(model.pc, [hd_insn])
# If the loop we're generating has an associated warp, we want to avoid
# generating any more loops in the body. It's really bad if we generate
# a loop or loopi instruction as the first instruction of the body
# (because it breaks the warping). But there's also no real benefit to
# allowing it, so let's not. To avoid generating any more loops, we
# hack the loop stack to pretend it is full.
ret_loop_stack = None
if has_warp:
ret_loop_stack = body_model.loop_stack.copy()
body_model.loop_stack.force_full()
return (body_model, ret_loop_stack)
def _pick_head(self,
space_here: int,
check: bool,
model: Model,
program: Program) -> Optional[Tuple[ProgInsn, Shape]]:
insn = self.pick_loop_insn()
# Pick a loop count
op0_type = insn.operands[0].op_type
op1_type = insn.operands[1].op_type
assert isinstance(op1_type, ImmOperandType)
lshape = self._pick_loop_shape(op0_type, op1_type,
space_here, check, model, program)
if lshape is None:
return None
bodysize, iters = lshape
# Extract the encoded operand value from iters. We ignore num_iters and
# warp: they will be used in gen_pieces (returned in lshape), but we
# don't need them here.
iter_opval, num_iters, warp = iters
# Generate the head instruction (which runs once, unconditionally) and
# clone model and program to add it
enc_bodysize = op1_type.op_val_to_enc_val(bodysize, model.pc)
assert enc_bodysize is not None
return (ProgInsn(insn, [iter_opval, enc_bodysize], None), lshape)
def _gen_pieces(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[Pieces]:
'''Generate a loop and return its constituent pieces
This is useful for subclasses that alter the generated loop after the
fact.
If this function succeeds, it may modify model (but it will not insert
the generated instructions into program).
'''
# A loop or loopi sequence has a loop/loopi instruction, at least one
# body instruction (the last of which must be a straight line
# instruction) and then needs a following trampoline. That means we
# need at least 3 instructions' space at the current PC.
space_here = program.get_insn_space_at(model.pc)
if space_here < 3:
return None
# The smallest possible loop takes 2 instructions (the loop instruction
# plus the single-instruction loop body)
if program.space < 2:
return None
# Don't blow the loop stack
if model.loop_stack.maybe_full():
return None
ret = self._pick_head(space_here, True, model, program)
if ret is None:
return None
hd_insn, lshape = ret
bodysize, iters = lshape
iter_opval, num_iters, warp = iters
# The address of the final instruction in the loop
end_addr = model.pc + 4 * bodysize
body_program = program.copy()
body_model, body_loop_stack = self._setup_body(hd_insn, end_addr,
model, body_program,
warp is not None)
# Constrain fuel in body_model: subtract one (for the first instruction
# after the loop) and then divide by the number of iterations. When we
# picked num_iters, we made sure this was still positive.
#
# The minimum fuel to give is 1 if bodysize is 1 or 2 otherwise
# (because the shortest possible sequence is a jump to the last
# instruction, then a straight line instruction).
min_fuel_per_iter = 1 if bodysize == 1 else 2
max_fuel_per_iter = (body_model.fuel - 1) // num_iters
assert min_fuel_per_iter <= max_fuel_per_iter
fuel_per_iter = random.randint(min_fuel_per_iter, max_fuel_per_iter)
body_model.fuel = fuel_per_iter
body_ret = self._gen_body(bodysize,
num_iters == 1,
hd_insn,
cont, body_model, body_program)
if body_ret is None:
return None
body_snippet, body_model = body_ret
# If we hacked the loop stack in _setup_body, the "correct" value of
# the loop stack is in body_loop_stack. Put it back.
if body_loop_stack is not None:
body_model.loop_stack = body_loop_stack
body_model.loop_stack.pop(end_addr)
# Calculate the actual amount of fuel that we used
body_fuel = fuel_per_iter - body_model.fuel
assert body_fuel > 0
fuel_afterwards = model.fuel - num_iters * body_fuel
# Update model to take the loop body into account. For the loop stack
# state, we just take whatever's in body_model. If everything was well
# balanced, this will match anyway (and, if not, it's the state that
# OTBN will be in at the end of the loop).
#
# For registers, if we know we have exactly one iteration through the
# body, we can just take them from body_model. Otherwise, we merge the
# two after "teleporting" model to the loop match address.
assert body_model.pc == end_addr + 4
if num_iters == 1:
model = body_model
else:
model.update_for_insn(hd_insn)
model.loop_stack = body_model.loop_stack
model.pc = body_model.pc
model.merge(body_model)
# Fix up model.fuel: the merge function will have taken the minimum
# between model and body_model, but we actually want it to be what
# we computed before.
model.fuel = fuel_afterwards
return (bodysize, hd_insn, body_snippet, model, warp)
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
hd_addr = model.pc
pieces = self._gen_pieces(cont, model, program)
if pieces is None:
return None
shape, hd_insn, body_snippet, model, warp = pieces
snippet = LoopSnippet(hd_addr, hd_insn, body_snippet, warp)
snippet.insert_into_program(program)
return (snippet, model)
| |
#!/usr/bin/env python
from collections import OrderedDict
try:
from cdecimal import Decimal
except ImportError: # pragma: no cover
from decimal import Decimal
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import shutil
import json
from agate import Table, TableSet
from agate.aggregations import *
from agate.data_types import *
from agate.computations import Formula
from agate.testcase import AgateTestCase
class TestTableSet(AgateTestCase):
def setUp(self):
self.table1 = (
('a', 1),
('a', 3),
('b', 2)
)
self.table2 = (
('b', 0),
('a', 2),
('c', 5)
)
self.table3 = (
('a', 1),
('a', 2),
('c', 3)
)
self.text_type = Text()
self.number_type = Number()
self.column_names = ['letter', 'number']
self.column_types = [self.text_type, self.number_type]
self.tables = OrderedDict([
('table1', Table(self.table1, self.column_names, self.column_types)),
('table2', Table(self.table2, self.column_names, self.column_types)),
('table3', Table(self.table3, self.column_names, self.column_types))
])
def test_create_tableset(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
self.assertEqual(len(tableset), 3)
def test_create_tableset_mismatched_column_names(self):
tables = OrderedDict([
('table1', Table(self.table1, self.column_names, self.column_types)),
('table2', Table(self.table2, self.column_names, self.column_types)),
('table3', Table(self.table3, ['foo', 'bar'], self.column_types))
])
with self.assertRaises(ValueError):
tableset = TableSet(tables.values(), tables.keys()) # noqa
def test_create_tableset_mismatched_column_types(self):
tables = OrderedDict([
('table1', Table(self.table1, self.column_names, self.column_types)),
('table2', Table(self.table2, self.column_names, self.column_types)),
('table3', Table(self.table3, self.column_names, [self.text_type, self.text_type]))
])
with self.assertRaises(ValueError):
tableset = TableSet(tables.values(), tables.keys()) # noqa
def test_from_csv(self):
tableset1 = TableSet(self.tables.values(), self.tables.keys())
tableset2 = TableSet.from_csv('examples/tableset', self.column_names)
self.assertSequenceEqual(tableset1.column_names, tableset2.column_names)
self.assertSequenceEqual([type(t) for t in tableset1.column_types], [type(t) for t in tableset2.column_types])
self.assertEqual(len(tableset1), len(tableset2))
for name in ['table1', 'table2', 'table3']:
self.assertEqual(len(tableset1[name].columns), len(tableset2[name].columns))
self.assertEqual(len(tableset1[name].rows), len(tableset2[name].rows))
self.assertSequenceEqual(tableset1[name].rows[0], tableset2[name].rows[0])
self.assertSequenceEqual(tableset1[name].rows[1], tableset2[name].rows[1])
self.assertSequenceEqual(tableset1[name].rows[2], tableset2[name].rows[2])
def test_tableset_from_csv_invalid_dir(self):
with self.assertRaises(IOError):
TableSet.from_csv('quack')
def test_from_csv_column_types_not_equal(self):
with self.assertRaises(ValueError):
TableSet.from_csv('examples/tableset/type_error')
def test_to_csv(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
tableset.to_csv('.test-tableset')
for name in ['table1', 'table2', 'table3']:
with open('.test-tableset/%s.csv' % name) as f:
contents1 = f.read()
with open('examples/tableset/%s.csv' % name) as f:
contents2 = f.read()
self.assertEqual(contents1, contents2)
shutil.rmtree('.test-tableset')
def test_from_json_dir(self):
tableset1 = TableSet(self.tables.values(), self.tables.keys())
tableset2 = TableSet.from_json('examples/tableset')
self.assertSequenceEqual(tableset1.column_names, tableset2.column_names)
self.assertSequenceEqual([type(t) for t in tableset1.column_types], [type(t) for t in tableset2.column_types])
self.assertEqual(len(tableset1), len(tableset2))
for name in ['table1', 'table2', 'table3']:
self.assertEqual(len(tableset1[name].columns), len(tableset2[name].columns))
self.assertEqual(len(tableset1[name].rows), len(tableset2[name].rows))
self.assertSequenceEqual(tableset1[name].rows[0], tableset2[name].rows[0])
self.assertSequenceEqual(tableset1[name].rows[1], tableset2[name].rows[1])
self.assertSequenceEqual(tableset1[name].rows[2], tableset2[name].rows[2])
def test_from_json_file(self):
tableset1 = TableSet(self.tables.values(), self.tables.keys())
tableset2 = TableSet.from_json('examples/test_tableset.json')
with open('examples/test_tableset.json') as f:
filelike = StringIO(f.read())
tableset3 = TableSet.from_json(filelike)
self.assertSequenceEqual(tableset1.column_names, tableset2.column_names, tableset3.column_names)
self.assertSequenceEqual([type(t) for t in tableset1.column_types], [type(t) for t in tableset2.column_types], [type(t) for t in tableset3.column_types])
self.assertEqual(len(tableset1), len(tableset2), len(tableset3))
for name in ['table1', 'table2', 'table3']:
self.assertEqual(len(tableset1[name].columns), len(tableset2[name].columns), len(tableset3[name].columns))
self.assertEqual(len(tableset1[name].rows), len(tableset2[name].rows), len(tableset3[name].rows))
self.assertSequenceEqual(tableset1[name].rows[0], tableset2[name].rows[0], tableset3[name].rows[0])
self.assertSequenceEqual(tableset1[name].rows[1], tableset2[name].rows[1], tableset3[name].rows[1])
self.assertSequenceEqual(tableset1[name].rows[2], tableset2[name].rows[2], tableset3[name].rows[2])
def test_from_json_false_path(self):
with self.assertRaises(IOError):
tableset1 = TableSet.from_json('notapath') # noqa
def test_to_json(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
tableset.to_json('.test-tableset')
for name in ['table1', 'table2', 'table3']:
with open('.test-tableset/%s.json' % name) as f:
contents1 = json.load(f)
with open('examples/tableset/%s.json' % name) as f:
contents2 = json.load(f)
self.assertEqual(contents1, contents2)
shutil.rmtree('.test-tableset')
def test_to_nested_json(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
output = StringIO()
tableset.to_json(output, nested=True)
tableset.to_json('.test-tableset/tableset.json', nested=True)
contents1 = json.loads(output.getvalue())
with open('.test-tableset/tableset.json') as f:
contents2 = json.load(f)
with open('examples/test_tableset.json') as f:
contents3 = json.load(f)
self.assertEqual(contents1, contents3)
self.assertEqual(contents2, contents3)
shutil.rmtree('.test-tableset')
def test_get_column_types(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
self.assertSequenceEqual(tableset.column_types, self.column_types)
def test_get_column_names(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
self.assertSequenceEqual(tableset.column_names, self.column_names)
def test_merge(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
table = tableset.merge()
self.assertColumnNames(table, ['group', 'letter', 'number'])
self.assertColumnTypes(table, [Text, Text, Number])
self.assertEqual(len(table.rows), 9)
self.assertSequenceEqual(table.rows[0], ['table1', 'a', 1])
self.assertSequenceEqual(table.rows[8], ['table3', 'c', 3])
def test_merge_key_name(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='foo')
table = tableset.merge()
self.assertColumnNames(table, ['foo', 'letter', 'number'])
self.assertColumnTypes(table, [Text, Text, Number])
def test_merge_groups(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='foo')
table = tableset.merge(groups=['red', 'blue', 'green'], group_name='color_code')
self.assertColumnNames(table, ['color_code', 'letter', 'number'])
self.assertColumnTypes(table, [Text, Text, Number])
self.assertEqual(len(table.rows), 9)
self.assertSequenceEqual(table.rows[0], ['red', 'a', 1])
self.assertSequenceEqual(table.rows[8], ['green', 'c', 3])
def test_merge_groups_invalid_length(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(ValueError):
table = tableset.merge(groups=['red', 'blue'], group_name='color_code') # noqa
def test_merge_groups_invalid_type(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(ValueError):
table = tableset.merge(groups='invalid', group_name='color_code') # noqa
def test_compute(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_tableset = tableset.compute([
('new_column', Formula(self.text_type, lambda r: '%(letter)s-%(number)i' % r))
])
new_table = new_tableset['table1']
self.assertColumnNames(new_table, ('letter', 'number', 'new_column',))
self.assertColumnTypes(new_table, (Text, Number, Text))
self.assertRows(new_table, [
('a', 1, 'a-1'),
('a', 3, 'a-3'),
('b', 2, 'b-2')
])
new_table = new_tableset['table2']
self.assertRows(new_table, [
('b', 0, 'b-0'),
('a', 2, 'a-2'),
('c', 5, 'c-5')
])
new_table = new_tableset['table3']
self.assertSequenceEqual(new_table.rows[0], ('a', 1, 'a-1'))
self.assertSequenceEqual(new_table.rows[1], ('a', 2, 'a-2'))
self.assertSequenceEqual(new_table.rows[2], ('c', 3, 'c-3'))
def test_select(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_tableset = tableset.select(['number'])
for name, new_table in new_tableset.items():
self.assertColumnNames(new_table, ('number',))
self.assertColumnTypes(new_table, (Number,))
def test_print_structure(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
output = StringIO()
tableset.print_structure(output=output)
lines = output.getvalue().strip().split('\n')
self.assertEqual(len(lines), 7)
def test_print_structure_row_limit(self):
tables = self.tables
for i in range(25):
tables[str(i)] = self.tables['table1']
tableset = TableSet(tables.values(), tables.keys())
output = StringIO()
tableset.print_structure(output=output)
lines = output.getvalue().strip().split('\n')
self.assertEqual(len(lines), 24)
def test_aggregate_key_name(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
new_table = tableset.aggregate([
('count', Count())
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('test', 'count'))
self.assertColumnTypes(new_table, [Text, Number])
def test_aggregate_key_type(self):
tables = OrderedDict([
(1, Table(self.table1, self.column_names, self.column_types)),
(2, Table(self.table2, self.column_names, self.column_types)),
(3, Table(self.table3, self.column_names, self.column_types))
])
tableset = TableSet(tables.values(), tables.keys(), key_name='test', key_type=self.number_type)
new_table = tableset.aggregate([
('count', Count())
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('test', 'count'))
self.assertColumnTypes(new_table, [Number, Number])
def test_aggregate_row_names(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
new_table = tableset.aggregate([
('count', Count())
])
self.assertRowNames(new_table, ['table1', 'table2', 'table3'])
def test_aggregate_sum(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('number_sum', Sum('number'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'number_sum'))
self.assertColumnTypes(new_table, [Text, Number, Number])
self.assertRows(new_table, [
('table1', 3, 6),
('table2', 3, 7),
('table3', 3, 6)
])
def test_aggregate_min(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('number_min', Min('number'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'number_min'))
self.assertColumnTypes(new_table, [Text, Number, Number])
self.assertRows(new_table, [
('table1', 3, 1),
('table2', 3, 0),
('table3', 3, 1)
])
def test_aggregate_two_ops(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('number_sum', Sum('number')),
('number_mean', Mean('number'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'number_sum', 'number_mean'))
self.assertColumnTypes(new_table, [Text, Number, Number, Number])
self.assertRows(new_table, [
('table1', 3, 6, 2),
('table2', 3, 7, Decimal(7) / 3),
('table3', 3, 6, 2)
])
def test_aggregate_max_length(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('letter_max_length', MaxLength('letter'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'letter_max_length'))
self.assertColumnTypes(new_table, [Text, Number, Number])
self.assertRows(new_table, [
('table1', 3, 1),
('table2', 3, 1),
('table3', 3, 1)
])
def test_aggregate_sum_invalid(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(DataTypeError):
tableset.aggregate([('letter_sum', Sum('letter'))])
def test_aggregeate_bad_column(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(KeyError):
tableset.aggregate([('one_sum', Sum('one'))])
with self.assertRaises(KeyError):
tableset.aggregate([('bad_sum', Sum('bad'))])
def test_nested(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
nested = tableset.group_by('letter')
self.assertIsInstance(nested, TableSet)
self.assertEqual(len(nested), 3)
self.assertSequenceEqual(nested._column_names, ('letter', 'number'))
self.assertSequenceEqual(nested._column_types, (self.text_type, self.number_type))
self.assertIsInstance(nested['table1'], TableSet)
self.assertEqual(len(nested['table1']), 2)
self.assertSequenceEqual(nested['table1']._column_names, ('letter', 'number'))
self.assertSequenceEqual(nested['table1']._column_types, (self.text_type, self.number_type))
self.assertIsInstance(nested['table1']['a'], Table)
self.assertEqual(len(nested['table1']['a'].columns), 2)
self.assertEqual(len(nested['table1']['a'].rows), 2)
def test_nested_aggregation(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
nested = tableset.group_by('letter')
results = nested.aggregate([
('count', Count()),
('number_sum', Sum('number'))
])
self.assertIsInstance(results, Table)
self.assertColumnNames(results, ('test', 'letter', 'count', 'number_sum'))
self.assertColumnTypes(results, (Text, Text, Number, Number))
self.assertRows(results, [
('table1', 'a', 2, 4),
('table1', 'b', 1, 2),
('table2', 'b', 1, 0),
('table2', 'a', 1, 2),
('table2', 'c', 1, 5),
('table3', 'a', 2, 3),
('table3', 'c', 1, 3)
])
def test_nested_aggregate_row_names(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
nested = tableset.group_by('letter')
results = nested.aggregate([
('count', Count()),
('number_sum', Sum('number'))
])
self.assertRowNames(results, [
('table1', 'a'),
('table1', 'b'),
('table2', 'b'),
('table2', 'a'),
('table2', 'c'),
('table3', 'a'),
('table3', 'c'),
])
self.assertSequenceEqual(results.rows[('table1', 'a')], ('table1', 'a', 2, 4))
self.assertSequenceEqual(results.rows[('table2', 'c')], ('table2', 'c', 1, 5))
def test_proxy_local(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='foo')
self.assertEqual(tableset._key_name, 'foo')
def test_proxy_maintains_key(self):
number_type = Number()
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='foo', key_type=number_type)
self.assertEqual(tableset.key_name, 'foo')
self.assertEqual(tableset.key_type, number_type)
new_tableset = tableset.select(['number'])
self.assertEqual(new_tableset.key_name, 'foo')
self.assertEqual(new_tableset.key_type, number_type)
def test_proxy_invalid(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(AttributeError):
tableset.foo()
| |
# -*- coding: utf-8 -*-
"""
Generate metadata and bag for a resource from Django
"""
import os
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.hydroshare import hs_requests
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files
from hs_core.tasks import create_bag_by_irods
from django_irods.icommands import SessionException
def check_bag(rid, options):
requests.packages.urllib3.disable_warnings()
try:
resource = BaseResource.objects.get(short_id=rid)
istorage = resource.get_irods_storage()
root_exists = istorage.exists(resource.root_path)
if root_exists:
# print status of metadata/bag system
scimeta_path = os.path.join(resource.root_path, 'data',
'resourcemetadata.xml')
scimeta_exists = istorage.exists(scimeta_path)
if scimeta_exists:
print("resource metadata {} found".format(scimeta_path))
else:
print("resource metadata {} NOT FOUND".format(scimeta_path))
resmap_path = os.path.join(resource.root_path, 'data', 'resourcemap.xml')
resmap_exists = istorage.exists(resmap_path)
if resmap_exists:
print("resource map {} found".format(resmap_path))
else:
print("resource map {} NOT FOUND".format(resmap_path))
bag_exists = istorage.exists(resource.bag_path)
if bag_exists:
print("bag {} found".format(resource.bag_path))
else:
print("bag {} NOT FOUND".format(resource.bag_path))
dirty = resource.getAVU('metadata_dirty')
print("{}.metadata_dirty is {}".format(rid, str(dirty)))
modified = resource.getAVU('bag_modified')
print("{}.bag_modified is {}".format(rid, str(modified)))
if options['reset']: # reset all data to pristine
resource.setAVU('metadata_dirty', 'true')
print("{}.metadata_dirty set to true".format(rid))
try:
istorage.delete(resource.scimeta_path)
print("{} deleted".format(resource.scimeta_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.scimeta_path,
ex.stderr))
try:
istorage.delete(resource.resmap_path)
print("{} deleted".format(resource.resmap_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.resmap_path,
ex.stderr))
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to true".format(rid))
try:
istorage.delete(resource.bag_path)
print("{} deleted".format(resource.bag_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.bag_path,
ex.stderr))
if options['reset_metadata']:
resource.setAVU('metadata_dirty', 'true')
print("{}.metadata_dirty set to true".format(rid))
try:
istorage.delete(resource.scimeta_path)
print("{} deleted".format(resource.scimeta_path))
except SessionException as ex:
print("delete of {} failed: {}"
.format(resource.scimeta_path,
ex.stderr))
try:
istorage.delete(resource.resmap_path)
print("{} deleted".format(resource.resmap_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.resmap_path,
ex.stderr))
if options['reset_bag']:
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to true".format(rid))
try:
istorage.delete(resource.bag_path)
print("{} deleted".format(resource.bag_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.bag_path,
ex.stderr))
if options['generate']: # generate usable bag
if not options['if_needed'] or dirty or not scimeta_exists or not resmap_exists:
try:
create_bag_metadata_files(resource)
except ValueError as e:
print(("{}: value error encountered: {}".format(rid, str(e))))
return
print("{} metadata generated from Django".format(rid))
resource.setAVU('metadata_dirty', 'false')
resource.setAVU('bag_modified', 'true')
print("{}.metadata_dirty set to false".format(rid))
if not options['if_needed'] or modified or not bag_exists:
create_bag_by_irods(rid)
print("{} bag generated from iRODs".format(rid))
resource.setAVU('bag_modified', 'false')
print("{}.bag_modified set to false".format(rid))
if options['generate_metadata']:
if not options['if_needed'] or dirty or not scimeta_exists or not resmap_exists:
try:
create_bag_metadata_files(resource)
except ValueError as e:
print(("{}: value error encountered: {}".format(rid, str(e))))
return
print("{}: metadata generated from Django".format(rid))
resource.setAVU('metadata_dirty', 'false')
print("{}.metadata_dirty set to false".format(rid))
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to false".format(rid))
if options['generate_bag']:
if not options['if_needed'] or modified or not bag_exists:
create_bag_by_irods(rid)
print("{}: bag generated from iRODs".format(rid))
resource.setAVU('bag_modified', 'false')
print("{}.bag_modified set to false".format(rid))
if options['download_bag']:
if options['password']:
server = getattr(settings, 'FQDN_OR_IP', 'www.hydroshare.org')
uri = "https://{}/hsapi/resource/{}/".format(server, rid)
print("download uri is {}".format(uri))
r = hs_requests.get(uri, verify=False, stream=True,
auth=requests.auth.HTTPBasicAuth(options['login'],
options['password']))
print("download return status is {}".format(str(r.status_code)))
print("redirects:")
for thing in r.history:
print("...url: {}".format(thing.url))
filename = 'tmp/check_bag_block'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
else:
print("cannot download bag without username and password.")
if options['open_bag']:
if options['password']:
server = getattr(settings, 'FQDN_OR_IP', 'www.hydroshare.org')
uri = "https://{}/hsapi/resource/{}/".format(server, rid)
print("download uri is {}".format(uri))
r = hs_requests.get(uri, verify=False, stream=True,
auth=requests.auth.HTTPBasicAuth(options['login'],
options['password']))
print("download return status is {}".format(str(r.status_code)))
print("redirects:")
for thing in r.history:
print("...url: {}".format(thing.url))
filename = 'tmp/check_bag_block'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
break
else:
print("cannot open bag without username and password.")
else:
print("Resource with id {} does not exist in iRODS".format(rid))
except BaseResource.DoesNotExist:
print("Resource with id {} NOT FOUND in Django".format(rid))
class Command(BaseCommand):
help = "Create metadata files and bag for a resource."
def add_arguments(self, parser):
# a list of resource id's, or none to check all resources
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--reset',
action='store_true', # True for presence, False for absence
dest='reset', # value is options['reset']
help='delete metadata and bag and start over'
)
parser.add_argument(
'--reset_metadata',
action='store_true', # True for presence, False for absence
dest='reset_metadata', # value is options['reset_metadata']
help='delete metadata files and start over'
)
parser.add_argument(
'--reset_bag',
action='store_true', # True for presence, False for absence
dest='reset_bag', # value is options['reset_bag']
help='delete bag and start over'
)
parser.add_argument(
'--generate',
action='store_true', # True for presence, False for absence
dest='generate', # value is options['generate']
help='force generation of metadata and bag'
)
parser.add_argument(
'--generate_metadata',
action='store_true', # True for presence, False for absence
dest='generate_metadata', # value is options['generate_metadata']
help='force generation of metadata and bag'
)
parser.add_argument(
'--generate_bag',
action='store_true', # True for presence, False for absence
dest='generate_bag', # value is options['generate_bag']
help='force generation of metadata and bag'
)
parser.add_argument(
'--if_needed',
action='store_true', # True for presence, False for absence
dest='if_needed', # value is options['if_needed']
help='generate only if not present'
)
parser.add_argument(
'--download_bag',
action='store_true', # True for presence, False for absence
dest='download_bag', # value is options['download_bag']
help='try downloading the bag'
)
parser.add_argument(
'--open_bag',
action='store_true', # True for presence, False for absence
dest='open_bag', # value is options['open_bag']
help='try opening the bag in http without downloading'
)
parser.add_argument(
'--login',
default='admin',
dest='login', # value is options['login']
help='HydroShare login name'
)
parser.add_argument(
'--password',
default=None,
dest='password', # value is options['password']
help='HydroShare password'
)
def handle(self, *args, **options):
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
check_bag(rid, options)
else:
for r in BaseResource.objects.all():
check_bag(r.short_id, options)
| |
'''Settings
========
.. versionadded:: 1.0.7
This module is a complete and extensible framework for building a
Settings interface in your application. By default the interface uses
a :class:`SettingsWithSpinner`, which consists of a
:class:`~kivy.uix.spinner.Spinner` (top) to switch between individual
settings panels (bottom). See :ref:`differentlayouts` for some
alternatives.
.. image:: images/settingswithspinner_kivy.jpg
:align: center
:class:`SettingsPanel` represents a group of configurable options. The
:data:`SettingsPanel.title` property is used by :class:`Settings` when a panel
is added - it determines the name of the sidebar button. SettingsPanel controls
a :class:`~kivy.config.ConfigParser` instance.
The panel can be automatically constructed from a JSON definition file: you
describe the settings you want and corresponding sections/keys in the
ConfigParser instance... and you're done!
Settings are also integrated with the :class:`~kivy.app.App` class. Use
:func:`Settings.add_kivy_panel` to configure the Kivy core settings in a panel.
.. _settings_json:
Create panel from JSON
----------------------
To create a panel from a JSON-file, you need two things:
* a :class:`~kivy.config.ConfigParser` instance with default values
* a JSON file
.. warning::
The :class:`kivy.config.ConfigParser` is required. You cannot use the
default ConfigParser from Python libraries.
You must create and handle the :class:`~kivy.config.ConfigParser`
object. SettingsPanel will read the values from the associated
ConfigParser instance. Make sure you have default values for all sections/keys
in your JSON file!
The JSON file contains structured information to describe the available
settings. Here is an example::
[
{
"type": "title",
"title": "Windows"
},
{
"type": "bool",
"title": "Fullscreen",
"desc": "Set the window in windowed or fullscreen",
"section": "graphics",
"key": "fullscreen",
"true": "auto"
}
]
Each element in the root list represents a setting that the user can configure.
Only the "type" key is mandatory: an instance of the associated class will be
created and used for the setting - other keys are assigned to corresponding
properties of that class.
============== =================================================
Type Associated class
-------------- -------------------------------------------------
title :class:`SettingTitle`
bool :class:`SettingBoolean`
numeric :class:`SettingNumeric`
options :class:`SettingOptions`
string :class:`SettingString`
path :class:`SettingPath` (new from 1.1.0)
============== =================================================
In the JSON example above, the first element is of type "title". It will create
a new instance of :class:`SettingTitle` and apply the rest of the key/value
pairs to the properties of that class, i.e., "title": "Windows" sets the
:data:`SettingTitle.title` property to "Windows".
To load the JSON example to a :class:`Settings` instance, use the
:meth:`Settings.add_json_panel` method. It will automatically instantiate
:class:`SettingsPanel` and add it to :class:`Settings`::
from kivy.config import ConfigParser
config = ConfigParser()
config.read('myconfig.ini')
s = Settings()
s.add_json_panel('My custom panel', config, 'settings_custom.json')
s.add_json_panel('Another panel', config, 'settings_test2.json')
# then use the s as a widget...
.. _differentlayouts:
Different panel layouts
-----------------------
A kivy :class:`~kivy.app.App` can automatically create and display a
:class:`Settings` instance. See the :attr:`~kivy.app.App.settings_cls`
documentation for details on how to choose which settings class to
display.
Several pre-built settings widgets are available. All except
:class:`SettingsWithNoMenu` include close buttons triggering the
on_close event.
- :class:`Settings`: Displays settings with a sidebar at the left to
switch between json panels. This is the default behaviour.
- :class:`SettingsWithSidebar`: A trivial subclass of
:class:`Settings`.
- :class:`SettingsWithSpinner`: Displays settings with a spinner at
the top, which can be used to switch between json panels. Uses
:class:`InterfaceWithSpinner` as the
:data:`~Settings.interface_cls`.
- :class:`SettingsWithTabbedPanel`: Displays json panels as individual
tabs in a :class:`~kivy.uix.tabbedpanel.TabbedPanel`. Uses
:class:`InterfaceWithTabbedPanel` as the :data:`~Settings.interface_cls`.
- :class:`SettingsWithNoMenu`: Displays a single json panel, with no
way to switch to other panels and no close button. This makes it
impossible for the user to exit unless
:meth:`~kivy.app.App.close_settings` is overridden with a different
close trigger! Uses :class:`InterfaceWithNoMenu` as the
:data:`~Settings.interface_cls`.
You can construct your own settings panels with any layout you choose
by setting :data:`Settings.interface_cls`. This should be a widget
that displays a json settings panel with some way to switch between
panels. An instance will be automatically created by :class:`Settings`.
Interface widgets may be anything you like, but *must* have a method
add_panel that recieves newly created json settings panels for the
interface to display. See the documentation for
:class:`InterfaceWithSidebar` for more information. They may
optionally dispatch an on_close event, for instance if a close button
is clicked, which is used by :class:`Settings` to trigger its own
on_close event.
'''
__all__ = ('Settings', 'SettingsPanel', 'SettingItem', 'SettingString',
'SettingPath', 'SettingBoolean', 'SettingNumeric',
'SettingOptions', 'SettingsWithSidebar', 'SettingsWithSpinner',
'SettingsWithTabbedPanel', 'SettingsWithNoMenu',
'InterfaceWithSidebar', 'ContentPanel')
import json
import os
from kivy.metrics import dp
from kivy.config import ConfigParser
from kivy.animation import Animation
from kivy.compat import string_types, text_type
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.scrollview import ScrollView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty, ListProperty, \
BooleanProperty, NumericProperty, DictProperty
class SettingSpacer(Widget):
# Internal class, not documented.
pass
class SettingItem(FloatLayout):
'''Base class for individual settings (within a panel). This class cannot
be used directly; it is used for implementing the other setting classes.
It builds a row with title/description (left) and setting control (right).
Look at :class:`SettingBoolean`, :class:`SettingNumeric` and
:class:`SettingOptions` for usage example.
:Events:
`on_release`
Fired when the item is touched then released
'''
title = StringProperty('<No title set>')
'''Title of the setting, default to '<No title set>'.
:data:`title` is a :class:`~kivy.properties.StringProperty`, default to
'<No title set>'.
'''
desc = StringProperty(None, allownone=True)
'''Description of the setting, rendered on the line below title.
:data:`desc` is a :class:`~kivy.properties.StringProperty`, default to
None.
'''
disabled = BooleanProperty(False)
'''Indicate if this setting is disabled. If True, all touches on the
setting item will be discarded.
:data:`disabled` is a :class:`~kivy.properties.BooleanProperty`, default to
False.
'''
section = StringProperty(None)
'''Section of the token inside the :class:`~kivy.config.ConfigParser`
instance.
:data:`section` is a :class:`~kivy.properties.StringProperty`, default to
None.
'''
key = StringProperty(None)
'''Key of the token inside the :data:`section` in the
:class:`~kivy.config.ConfigParser` instance.
:data:`key` is a :class:`~kivy.properties.StringProperty`, default to None.
'''
value = ObjectProperty(None)
'''Value of the token, according to the :class:`~kivy.config.ConfigParser`
instance. Any change to the value will trigger a
:meth:`Settings.on_config_change` event.
:data:`value` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
panel = ObjectProperty(None)
'''(internal) Reference to the SettingsPanel with this setting. You don't
need to use it.
:data:`panel` is a :class:`~kivy.properties.ObjectProperty`, default to
None
'''
content = ObjectProperty(None)
'''(internal) Reference to the widget that contains the real setting.
As soon as the content object is set, any further call to add_widget will
call the content.add_widget. This is automatically set.
:data:`content` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
selected_alpha = NumericProperty(0)
'''(internal) Float value from 0 to 1, used to animate the background when
the user touches the item.
:data:`selected_alpha` is a :class:`~kivy.properties.NumericProperty`,
default to 0.
'''
__events__ = ('on_release', )
def __init__(self, **kwargs):
super(SettingItem, self).__init__(**kwargs)
self.value = self.panel.get_value(self.section, self.key)
def add_widget(self, *largs):
if self.content is None:
return super(SettingItem, self).add_widget(*largs)
return self.content.add_widget(*largs)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if self.disabled:
return
touch.grab(self)
self.selected_alpha = 1
return super(SettingItem, self).on_touch_down(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.dispatch('on_release')
Animation(selected_alpha=0, d=.25, t='out_quad').start(self)
return True
return super(SettingItem, self).on_touch_up(touch)
def on_release(self):
pass
def on_value(self, instance, value):
if not self.section or not self.key:
return
# get current value in config
panel = self.panel
if not isinstance(value, string_types):
value = str(value)
panel.set_value(self.section, self.key, value)
class SettingBoolean(SettingItem):
'''Implementation of a boolean setting on top of :class:`SettingItem`. It
is visualized with a :class:`~kivy.uix.switch.Switch` widget. By default,
0 and 1 are used for values, you can change them by setting :data:`values`.
'''
values = ListProperty(['0', '1'])
'''Values used to represent the state of the setting. If you use "yes" and
"no" in your ConfigParser instance::
SettingBoolean(..., values=['no', 'yes'])
.. warning::
You need a minimum of two values, the index 0 will be used as False,
and index 1 as True
:data:`values` is a :class:`~kivy.properties.ListProperty`, default to
['0', '1']
'''
class SettingString(SettingItem):
'''Implementation of a string setting on top of :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it's shown
:data:`popup` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup, and
to listen for changes.
:data:`popup` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.text.strip()
self.value = value
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
self.popup = popup = Popup(title=self.title,
content=content, size_hint=(None, None), size=('400dp', '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(text=self.value,
font_size='24sp', multiline=False, size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingPath(SettingItem):
'''Implementation of a Path setting on top of :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.filechooser.FileChooserListView` so the user can enter
a custom value.
.. versionadded:: 1.1.0
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:data:`popup` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup, and
to listen for changes.
:data:`popup` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.selection
if not value:
return
self.value = os.path.realpath(value[0])
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing=5)
self.popup = popup = Popup(title=self.title,
content=content, size_hint=(None, None), size=(400, 400))
# create the filechooser
self.textinput = textinput = FileChooserListView(
path=self.value, size_hint=(1, 1), dirselect=True)
textinput.bind(on_path=self._validate)
self.textinput = textinput
# construct the content
content.add_widget(textinput)
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingNumeric(SettingString):
'''Implementation of a numeric setting on top of :class:`SettingString`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
def _validate(self, instance):
# we know the type just by checking if there is a '.' in the original
# value
is_float = '.' in str(self.value)
self._dismiss()
try:
if is_float:
self.value = text_type(float(self.textinput.text))
else:
self.value = text_type(int(self.textinput.text))
except ValueError:
return
class SettingOptions(SettingItem):
'''Implementation of an option list on top of :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
list of options from which the user can select.
'''
options = ListProperty([])
'''List of all availables options. This must be a list of "string" items.
Otherwise, it will crash. :)
:data:`options` is a :class:`~kivy.properties.ListProperty`, default to [].
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:data:`popup` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _set_option(self, instance):
self.value = instance.text
self.popup.dismiss()
def _create_popup(self, instance):
# create the popup
content = BoxLayout(orientation='vertical', spacing='5dp')
self.popup = popup = Popup(content=content,
title=self.title, size_hint=(None, None), size=('400dp', '400dp'))
popup.height = len(self.options) * dp(55) + dp(150)
# add all the options
content.add_widget(Widget(size_hint_y=None, height=1))
uid = str(self.uid)
for option in self.options:
state = 'down' if option == self.value else 'normal'
btn = ToggleButton(text=option, state=state, group=uid)
btn.bind(on_release=self._set_option)
content.add_widget(btn)
# finally, add a cancel button to return on the previous panel
content.add_widget(SettingSpacer())
btn = Button(text='Cancel', size_hint_y=None, height=dp(50))
btn.bind(on_release=popup.dismiss)
content.add_widget(btn)
# and open the popup !
popup.open()
class SettingTitle(Label):
'''A simple title label, used to organize the settings in sections.
'''
title = Label.text
class SettingsPanel(GridLayout):
'''This class is used to contruct panel settings, for use with a
:class:`Settings` instance or subclass.
'''
title = StringProperty('Default title')
'''Title of the panel. The title will be reused by the :class:`Settings` in
the sidebar.
'''
config = ObjectProperty(None, allownone=True)
'''A :class:`kivy.config.ConfigParser` instance. See module documentation
for more information.
'''
settings = ObjectProperty(None)
'''A :class:`Settings` instance that will be used to fire the
`on_config_change` event.
'''
def __init__(self, **kwargs):
kwargs.setdefault('cols', 1)
super(SettingsPanel, self).__init__(**kwargs)
def on_config(self, instance, value):
if value is None:
return
if not isinstance(value, ConfigParser):
raise Exception('Invalid config object, you must use a'
'kivy.config.ConfigParser, not another one !')
def get_value(self, section, key):
'''Return the value of the section/key from the :data:`config`
ConfigParser instance. This function is used by :class:`SettingItem` to
get the value for a given section/key.
If you don't want to use a ConfigParser instance, you might want to
adapt this function.
'''
config = self.config
if not config:
return
return config.get(section, key)
def set_value(self, section, key, value):
current = self.get_value(section, key)
if current == value:
return
config = self.config
if config:
config.set(section, key, value)
config.write()
settings = self.settings
if settings:
settings.dispatch('on_config_change',
config, section, key, value)
class InterfaceWithSidebar(BoxLayout):
'''The default Settings interface class. It displays a sidebar menu
with names of available settings panels, which may be used to switch
which one is currently displayed.
See :meth:`~InterfaceWithSidebar.add_panel` for information on the
method you must implement if creating your own interface.
This class also dispatches an event 'on_close', which is triggered
when the sidebar menu's close button is released. If creating your
own interface widget, it should also dispatch such an event, which
will automatically be caught by :class:`Settings` and used to
trigger its own on_close event.
'''
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:data:`menu` is an :class:`~kivy.properties.ObjectProperty`
defaulting to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:data:`menu` is an :class:`~kivy.properties.ObjectProperty`
defaulting to None.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithSidebar, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored,
and the interface should provide a way to switch
between panels.
:param name: The name of the panel, as a string. It
may be used to represent the panel, but isn't necessarily
unique.
:param uid: A unique int identifying the panel. It should be
used to identify and switch between panels.
'''
self.menu.add_item(name, uid)
self.content.add_panel(panel, name, uid)
def on_close(self, *args):
pass
class InterfaceWithSpinner(BoxLayout):
'''A settings interface that displays a spinner at the top for
switching between panels.
This workings of this class are considered internal and are not
documented. See :meth:`InterfaceWithSidebar` for
information on implementing your own interface class.
'''
__events__ = ('on_close', )
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:data:`menu` is an :class:`~kivy.properties.ObjectProperty`
defaulting to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:data:`menu` is an :class:`~kivy.properties.ObjectProperty`
defaulting to None.
'''
def __init__(self, *args, **kwargs):
super(InterfaceWithSpinner, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored,
and the interface should provide a way to switch
between panels.
:param name: The name of the panel, as a string. It
may be used to represent the panel, but may not
be unique.
:param uid: A unique int identifying the panel. It should be
used to identify and switch between panels.
'''
self.content.add_panel(panel, name, uid)
self.menu.add_item(name, uid)
def on_close(self, *args):
pass
class ContentPanel(ScrollView):
'''A class for displaying settings panels. It displays a single
settings panel at a time, taking up the full size and shape of the
ContentPanel. It is used by :class:`InterfaceWithSidebar` and
:class:`InterfaceWithSpinner` to display settings.
'''
panels = DictProperty({})
'''(internal) Stores a dictionary relating settings panels to their uids.
:data:`panels` is a :class:`~kivy.properties.DictProperty`,
defaulting to {}.
'''
container = ObjectProperty()
'''(internal) A reference to the GridLayout that actually contains the
settings panel.
:data:`container` is an :class:`~kivy.properties.ObjectProperty`,
defaulting to None.
'''
current_panel = ObjectProperty(None)
'''(internal) A reference to the current settings panel.
:data:`current_panel` is an :class:`~kivy.properties.ObjectProperty`,
defaulting to None.
'''
current_uid = NumericProperty(0)
'''(internal) A reference to the uid of the current settings panel.
:data:`current_uid` is a
:class:`~kivy.properties.NumericProperty`, defaulting to 0.
'''
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored,
and displayed when requested.
:param name: The name of the panel, as a string. It
may be used to represent the panel.
:param uid: A unique int identifying the panel. It should be
stored and used to identify panels when switching.
'''
self.panels[uid] = panel
if not self.current_uid:
self.current_uid = uid
def on_current_uid(self, *args):
'''The uid of the currently displayed panel. Changing this will
automatically change the displayed panel.
:param uid: A panel uid. It should be used to retrieve and
display a settings panel that has previously been
added with :meth:`add_panel`.
'''
uid = self.current_uid
if uid in self.panels:
if self.current_panel is not None:
self.remove_widget(self.current_panel)
new_panel = self.panels[uid]
self.add_widget(new_panel)
self.current_panel = new_panel
return True
return False # New uid doesn't exist
def add_widget(self, widget):
if self.container is None:
super(ContentPanel, self).add_widget(widget)
else:
self.container.add_widget(widget)
def remove_widget(self, widget):
self.container.remove_widget(widget)
class Settings(BoxLayout):
'''Settings UI. Check module documentation for more information on how
to use this class.
:Events:
`on_config_change`: ConfigParser instance, section, key, value
Fired when section/key/value of a ConfigParser changes.
`on_close`
Fired by the default panel when the Close button is pressed.
'''
interface = ObjectProperty(None)
'''(internal) Reference to the widget that will contain, organise and
display the panel configuration panel widgets.
:data:`interface` is a :class:`~kivy.properties.ObjectProperty`, default to
None.
'''
interface_cls = ObjectProperty(InterfaceWithSidebar)
'''The widget class that will be used to display the graphical
interface for the settings panel. By default, it displays one settings
panel at a time with a sidebar to switch between them.
:data:`interface_cls` is a
:class:`~kivy.properties.ObjectProperty`, default to
:class`InterfaceWithSidebar`.
'''
__events__ = ('on_close', 'on_config_change')
def __init__(self, *args):
self._types = {}
super(Settings, self).__init__(*args)
self.add_interface()
self.register_type('string', SettingString)
self.register_type('bool', SettingBoolean)
self.register_type('numeric', SettingNumeric)
self.register_type('options', SettingOptions)
self.register_type('title', SettingTitle)
self.register_type('path', SettingPath)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(Settings, self).on_touch_down(touch)
return True
def register_type(self, tp, cls):
'''Register a new type that can be used in the JSON definition.
'''
self._types[tp] = cls
def on_close(self, *args):
pass
def add_interface(self):
'''(Internal) creates an instance of :attr:`Settings.interface_cls`,
and sets it to :attr:`~Settings.interface`. When json panels are
created, they will be added to this interface, which will display them
to the user.
'''
interface = self.interface_cls()
self.interface = interface
self.add_widget(interface)
self.interface.bind(on_close=lambda j: self.dispatch('on_close'))
def on_config_change(self, config, section, key, value):
pass
def add_json_panel(self, title, config, filename=None, data=None):
'''Create and add a new :class:`SettingsPanel` using the configuration
`config`, with the JSON definition `filename`.
Check the :ref:`settings_json` section in the documentation for more
information about JSON format, and the usage of this function.
'''
panel = self.create_json_panel(title, config, filename, data)
uid = panel.uid
if self.interface is not None:
self.interface.add_panel(panel, title, uid)
def create_json_panel(self, title, config, filename=None, data=None):
'''Create new :class:`SettingsPanel`.
.. versionadded:: 1.5.0
Check the documentation of :meth:`add_json_panel` for more information.
'''
if filename is None and data is None:
raise Exception('You must specify either the filename or data')
if filename is not None:
with open(filename, 'r') as fd:
data = json.loads(fd.read())
else:
data = json.loads(data)
if type(data) != list:
raise ValueError('The first element must be a list')
panel = SettingsPanel(title=title, settings=self, config=config)
for setting in data:
# determine the type and the class to use
if not 'type' in setting:
raise ValueError('One setting are missing the "type" element')
ttype = setting['type']
cls = self._types.get(ttype)
if cls is None:
raise ValueError('No class registered to handle the <%s> type' %
setting['type'])
# create a instance of the class, without the type attribute
del setting['type']
str_settings = {}
for key, item in setting.items():
str_settings[str(key)] = item
instance = cls(panel=panel, **str_settings)
# instance created, add to the panel
panel.add_widget(instance)
return panel
def add_kivy_panel(self):
'''Add a panel for configuring Kivy. This panel acts directly on the
kivy configuration. Feel free to include or exclude it in your
configuration.
See :meth:`~kivy.app.App.use_kivy_settings` for information on
enabling/disabling the automatic kivy panel.
'''
from kivy import kivy_data_dir
from kivy.config import Config
from os.path import join
self.add_json_panel('Kivy', Config,
join(kivy_data_dir, 'settings_kivy.json'))
class SettingsWithSidebar(Settings):
'''A settings widget that displays settings panels with a sidebar to
switch between them. This is the default behaviour of
:class:`Settings`, and this widget is a trivial wrapper subclass.
'''
class SettingsWithSpinner(Settings):
'''A settings widget that displays one settings panel at a time with a
spinner at the top to switch between them.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithSpinner
super(SettingsWithSpinner, self).__init__(*args, **kwargs)
class SettingsWithTabbedPanel(Settings):
'''A settings widget that displays settings panels as pages in a
:class:`~kivy.uix.tabbedpanel.TabbedPanel`.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithTabbedPanel
super(SettingsWithTabbedPanel, self).__init__(*args, **kwargs)
def on_close(self, *args):
pass
class SettingsWithNoMenu(Settings):
'''A settings widget that displays a single settings panel, with *no*
Close button. It will not accept more than one settings panel. It
is intended for use in programs with few enough settings that a
full panel switcher is not useful.
.. warning::
This Settings panel does *not* provide a Close
button, and so it is impossible to leave the settings screen
unless you also add other behaviour or override
:meth:`~kivy.app.App.display_settings` and
:meth:`~kivy.app.App.close_settings`.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithNoMenu
super(SettingsWithNoMenu, self).__init__(*args, **kwargs)
class InterfaceWithNoMenu(ContentPanel):
'''The interface widget used by :class:`SettingsWithNoMenu`. It
stores and displays a single settings panel.
This widget is considered internal and is not documented. See
:class:`ContentPanel` for information on defining your own content
widget.
'''
def add_widget(self, widget):
if self.container is not None and len(self.container.children) > 0:
raise Exception('ContentNoMenu cannot accept more than one settings'
'panel')
super(InterfaceWithNoMenu, self).add_widget(widget)
class InterfaceWithTabbedPanel(FloatLayout):
'''The content widget used by :class:`SettingsWithTabbedPanel`. It
stores and displays settings panels in tabs of a TabbedPanel.
This widget is considered internal and is not documented. See
:class:`InterfaceWithSidebar` for information on defining your own
interface widget.
'''
tabbedpanel = ObjectProperty()
close_button = ObjectProperty()
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithTabbedPanel, self).__init__(*args, **kwargs)
self.close_button.bind(on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
scrollview = ScrollView()
scrollview.add_widget(panel)
panelitem = TabbedPanelHeader(text=name, content=scrollview)
self.tabbedpanel.add_widget(panelitem)
def on_close(self, *args):
pass
class MenuSpinner(BoxLayout):
'''The menu class used by :class:`SettingsWithSpinner`. It provides a
sidebar with an entry for each settings panel.
This widget is considered internal and is not documented. See
:class:`MenuSidebar` for information on menus and creating your own menu
class.
'''
selected_uid = NumericProperty(0)
close_button = ObjectProperty(0)
spinner = ObjectProperty()
panel_names = DictProperty({})
spinner_text = StringProperty()
close_button = ObjectProperty()
def add_item(self, name, uid):
values = self.spinner.values
if name in values:
i = 2
while name + ' {}'.format(i) in values:
i += 1
name = name + ' {}'.format(i)
self.panel_names[name] = uid
self.spinner.values.append(name)
if not self.spinner.text:
self.spinner.text = name
def on_spinner_text(self, *args):
text = self.spinner_text
self.selected_uid = self.panel_names[text]
class MenuSidebar(FloatLayout):
'''The menu used by :class:`InterfaceWithSidebar`. It provides a
sidebar with an entry for each settings panel, which the user may
click to select.
'''
selected_uid = NumericProperty(0)
'''The uid of the currently selected panel. This may be used to switch
between displayed panels, e.g. by binding it to the
:data:`~ContentPanel.current_uid` of a :class:`ContentPanel`.
:data:`selected_uid` is a
:class`~kivy.properties.NumericProperty`, default to 0.
'''
buttons_layout = ObjectProperty(None)
'''(internal) Reference to the GridLayout that contains individual
settings panel menu buttons.
:data:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty`, default to None.
'''
close_button = ObjectProperty(None)
'''(internal) Reference to the widget's Close button.
:data:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty`, default to None.
'''
def add_item(self, name, uid):
'''This method is used to add new panels to the menu.
:param name: The name (a string) of the panel. It should be
used to represent the panel in the menu.
:param uid: The name (an int) of the panel. It should be used
internally to represent the panel, and used to set
self.selected_uid when the panel is changed.
'''
label = SettingSidebarLabel(text=name, uid=uid, menu=self)
if len(self.buttons_layout.children) == 0:
label.selected = True
if self.buttons_layout is not None:
self.buttons_layout.add_widget(label)
def on_selected_uid(self, *args):
'''(internal) unselects any currently selected menu buttons, unless
they represent the current panel.
'''
for button in self.buttons_layout.children:
if button.uid != self.selected_uid:
button.selected = False
class SettingSidebarLabel(Label):
# Internal class, not documented.
selected = BooleanProperty(False)
uid = NumericProperty(0)
menu = ObjectProperty(None)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
self.selected = True
self.menu.selected_uid = self.uid
if __name__ == '__main__':
from kivy.app import App
class SettingsApp(App):
def build(self):
s = Settings()
s.add_kivy_panel()
s.bind(on_close=self.stop)
return s
SettingsApp().run()
| |
######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# DO THE FLUX DENSITY BOOTSTRAPPING
logprint ("Starting EVLA_pipe_fluxboot.py", logfileout='logs/fluxboot.log')
time_list=runtiming('fluxboot', 'start')
QA2_fluxboot='Pass'
fluxscale_output=msname.rstrip('ms')+'fluxdensities'
fluxcalfields=flux_field_select_string
logprint ("Doing flux density bootstrapping", logfileout='logs/fluxboot.log')
logprint ("Flux densities will be written to "+fluxscale_output, logfileout='logs/fluxboot.log')
syscommand='rm -rf '+fluxscale_output
os.system(syscommand)
#If this is needed earlier in the script move to msinfo.py
#currentcasalog = casalogger.func_globals['thelogfile']
casalog.setlogfile(fluxscale_output)
default('fluxscale')
vis='calibrators.ms'
caltable='fluxgaincal.g'
fluxtable='fluxgaincalFcal.g'
reference=[fluxcalfields]
transfer=['']
append=False
refspwmap=[-1]
async=False
fluxscale()
casalog.setlogfile(maincasalog)
logprint ("Fitting data with power law", logfileout='logs/fluxboot.log')
#
# the variable center_frequencies should already have been filled out
# with the reference frequencies of the spectral window table
#
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
try:
ff = open(fluxscale_output, 'r')
except IOError as err:
logprint (fluxscale_output+" doesn't exist, error: "+err.filename, logfileout='logs/fluxboot.log')
# looking for lines like:
#2012-03-09 21:30:23 INFO fluxscale:::: Flux density for J1717-3342 in SpW=3 is: 1.94158 +/- 0.0123058 (SNR = 157.777, N= 34)
# sometimes they look like:
#2012-03-09 21:30:23 INFO fluxscale:::: Flux density for J1717-3342 in SpW=0 is: INSUFFICIENT DATA
# so watch for that.
sources = []
flux_densities = []
spws = []
for line in ff:
if 'Flux density for' in line:
fields = line[:-1].split()
if (fields[11] != 'INSUFFICIENT'):
sources.append(fields[7])
flux_densities.append([float(fields[11]), float(fields[13])])
spws.append(int(fields[9].split('=')[1]))
ii = 0
unique_sources = list(np.unique(sources))
results = []
for source in unique_sources:
indices = []
for ii in range(len(sources)):
if (sources[ii] == source):
indices.append(ii)
bands = []
for ii in range(len(indices)):
bands.append(find_EVLA_band(center_frequencies[spws[indices[ii]]]))
unique_bands = list(np.unique(bands))
for band in unique_bands:
lfreqs = []
lfds = []
lerrs = []
uspws = []
for ii in range(len(indices)):
if find_EVLA_band(center_frequencies[spws[indices[ii]]]) == band:
lfreqs.append(log10(center_frequencies[spws[indices[ii]]]))
lfds.append(log10(flux_densities[indices[ii]][0]))
lerrs.append(log10(e) * flux_densities[indices[ii]][1]/flux_densities[indices[ii]][0])
uspws.append(spws[indices[ii]])
# if we didn't care about the errors on the data or the fit coefficients, just:
# coefficients = np.polyfit(lfreqs, lfds, 1)
# or, if we ever get to numpy 1.7.x, for weighted fit, and returning
# covariance matrix, do:
# ...
# weights = []
# weight_sum = 0.0
# for ii in range(len(lfreqs)):
# weights.append(1.0 / (lerrs[ii]*lerrs[ii]))
# weight_sum += weights[ii]
# for ii in range(len(weights)):
# weights[ii] /= weight_sum
# coefficients = np.polyfit(lfreqs, lfds, 1, w=weights, cov=True)
# but, for now, use the full scipy.optimize.leastsq route...
#
# actually, after a lot of testing, np.polyfit does not return a global
# minimum solution. sticking with leastsq (modified as below to get the
# proper errors), or once we get a modern enough version of scipy, moving
# to curve_fit, is better.
#
if len(lfds) < 2:
aa = lfds[0]
bb = 0.0
SNR = 0.0
else:
alfds = scp.array(lfds)
alerrs = scp.array(lerrs)
alfreqs = scp.array(lfreqs)
pinit = [0.0, 0.0]
fit_out = scpo.leastsq(errfunc, pinit, args=(alfreqs, alfds, alerrs), full_output=1)
pfinal = fit_out[0]
covar = fit_out[1]
aa = pfinal[0]
bb = pfinal[1]
#
# the fit is of the form:
# log(S) = a + b * log(f)
# with a = pfinal[0] and b = pfinal[1]. the errors on the coefficients are
# sqrt(covar[i][i]*residual_variance) with the residual covariance calculated
# as below (it's like the reduced chi squared without dividing out the errors).
# see the scipy.optimize.leastsq documentation and
# http://stackoverflow.com/questions/14854339/in-scipy-how-and-why-does-curve-fit-calculate-the-covariance-of-the-parameter-es
#
summed_error = 0.0
for ii in range(len(alfds)):
model = aa + bb*alfreqs[ii]
residual = (model - alfds[ii]) * (model - alfds[ii])
summed_error += residual
residual_variance = summed_error / (len(alfds) - 2)
SNR = fabs(bb) / sqrt(covar[1][1] * residual_variance)
#
# take as the reference frequency the lowest one. (this shouldn't matter,
# in principle).
#
reffreq = 10.0**lfreqs[0]/1.0e9
fluxdensity = 10.0**(aa + bb*lfreqs[0])
spix = bb
results.append([ source, uspws, fluxdensity, spix, SNR, reffreq ])
logprint(source + ' ' + band + ' fitted spectral index & SNR = ' + str(spix) + ' ' + str(SNR), logfileout='logs/fluxboot.log')
logprint("Frequency, data, error, and fitted data:", logfileout='logs/fluxboot.log')
for ii in range(len(lfreqs)):
SS = fluxdensity * (10.0**lfreqs[ii]/reffreq/1.0e9)**spix
fderr = lerrs[ii]*(10**lfds[ii])/log10(e)
logprint(' '+str(10.0**lfreqs[ii]/1.0e9)+' '+ str(10.0**lfds[ii])+' '+str(fderr)+' '+str(SS), logfileout='logs/fluxboot.log')
logprint ("Setting power-law fit in the model column", logfileout='logs/fluxboot.log')
for result in results:
for spw_i in result[1]:
#
# here, check on SNR, but don't do this yet, until we know what typical SNRs are
#
# if result[4] > SNRlimit:
logprint('Running setjy on spw '+str(spw_i), logfileout='logs/fluxboot.log')
default('setjy')
vis='calibrators.ms'
field = str(result[0])
#spw = ','.join(["%s" % ii for ii in result[1]])
spw = str(spw_i)
selectdata=False
modimage=''
listmodimages=False
scalebychan=True
fluxdensity = [ result[2], 0, 0, 0 ]
spix = result[3]
reffreq = str(result[5])+'GHz'
standard='Perley-Butler 2010'
usescratch=False
async=False
setjy()
vis=ms_active
setjy()
if (abs(spix) > 5.0):
QA2_fluxboot='Fail'
logprint ("Flux density bootstrapping finished", logfileout='logs/fluxboot.log')
logprint ("Plotting model calibrator flux densities", logfileout='logs/fluxboot.log')
syscommand='rm -rf bootstrappedFluxDensities.png'
os.system(syscommand)
clearstat()
default('plotms')
vis=ms_active
xaxis='freq'
yaxis='amp'
ydatacolumn='model'
selectdata=True
scan=calibrator_scan_select_string
correlation=corrstring
averagedata=True
avgtime='1e8s'
avgscan=True
transform=False
extendflag=False
iteraxis=''
coloraxis='field'
plotrange=[]
title=''
xlabel=''
ylabel=''
showmajorgrid=False
showminorgrid=False
plotfile='bootstrappedFluxDensities.png'
overwrite=True
async=False
plotms()
logprint ("QA2 score: "+QA2_fluxboot, logfileout='logs/fluxboot.log')
logprint ("Finished EVLA_pipe_fluxboot.py", logfileout='logs/fluxboot.log')
time_list=runtiming('fluxboot', 'end')
pipeline_save()
| |
__author__ = 'Arunkumar Eli'
__email__ = "elrarun@gmail.com"
from proboscis import test
from selenium import webdriver
from pages import InfraHostsPage
from pages import WelcomePage
from pages import InfraPage
from pages import AmazonEc2Page
from pages import DigitalOceanPage
from pages import HostRegistrationPage
import time
import traceback
from datetime import datetime
import random
import types
import unittest
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis import after_class
from proboscis import before_class
from proboscis import SkipTest
from proboscis import test
import constants
# @test(groups=["DigitalOcean"])
# class DigitalTestHostAddDelete (object):
#
# @test
# def __init__(self):
# self.driver = webdriver.Firefox()
# self.driver.implicitly_wait(10)
# self.digital_handle = DigitalOceanPage.DigitalOcean(self.driver)
# self.infra_hosts_handle = InfraHostsPage.InfraHostsPage(self.driver)
# self.infra_page = InfraPage.InfraPage(self.driver)
# self.welcome_page = WelcomePage.WelcomePage(self.driver)
#
# @test
# def test_add_digital_host(self):
# try:
# print "Inside test_add_digital_ocean"
# self.driver.get(constants.host_add_DigitalOcean_url)
# self.digital_handle.select_quantity()
# self.digital_handle.click_slide_bar()
# self.digital_handle.input_host_name("DHost01")
# self.digital_handle.input_host_desc("DigitalOcean Hosts")
# self.digital_handle.input_access_token("a1a7c625c612c3d9a07300f1396fff2ff129342c63d4516b3101fdb61b1f2a48")
# self.digital_handle.select_image("ubuntu-14-04-x64")
# self.digital_handle.select_host_mem_size("16gb")
# self.digital_handle.select_region("San Francisco 1")
# self.digital_handle.click_create_btn()
# time.sleep(10)
# print "DigitalOcean Hosts add complete"
# except Exception as ex:
# traceback.print_exc()
#
# @test(depends_on=[test_add_digital_host])
# def test_del_host(self):
# self.driver.get(constants.RancherServerBaseURL + constants.welcome_url)
# self.driver.get(constants.inventory_url)
# time.sleep(2)
# print "Going to delete hosts"
# self.infra_hosts_handle.host_delete()
# print "Hosts delete complete"
#
# @test(depends_on=[test_del_host], always_run=True)
# def shutdown(self):
# print "Inside tearDown"
# self.driver.close()
@test(groups=["EC2"])
class EC2TestHostAddDelete (object):
@test
def init(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(10)
self.driver.get(constants.welcome_url)
self.welcome_page = WelcomePage.WelcomePage(self.driver)
@test(groups=["EC2"], depends_on=[init])
def test_add_ec2_host(self):
assert self.welcome_page.is_title_matches(), "Rancher"
self.welcome_page.click_infra_tab()
time.sleep(1)
self.infra_handle = InfraPage.InfraPage(self.driver)
self.infra_handle.click_add_host()
try:
self.host_registration_handle = HostRegistrationPage.HostRegistration(self.driver)
access_control_txt = self.host_registration_handle.get_access_control_txt()
host_reg_txt = self.host_registration_handle.get_host_registration_txt()
if "Access Control" in access_control_txt and "Host Registration" in host_reg_txt:
self.host_registration_handle.click_save_btn()
time.sleep(1)
except Exception as ex:
print "Host Registration page not found"
self.infra_handle = InfraPage.InfraPage(self.driver)
cur_text = self.infra_handle.get_add_host_hdr()
assert cur_text, "Add Host"
self.infra_hosts_handle = InfraHostsPage.InfraHostsPage(self.driver)
try:
self.infra_hosts_handle.click_ec2_img()
time.sleep(2)
except Exception as ex:
print "Not clicked"
self.ec2_handle = AmazonEc2Page.AmazonEc2(self.driver)
self.ec2_handle.input_access_key("AKIAI54MYS73NAP2ABHA")
self.ec2_handle.input_secret_key("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")
self.ec2_handle.click_next_btn()
time.sleep(2)
cur_text = self.ec2_handle.get_availability_zone_list()
print cur_text
assert cur_text, "Availability zone & vpc".capitalize()
self.ec2_handle.select_zone("us-west-2a")
self.ec2_handle.click_vpc_radio_btn()
self.ec2_handle.click_set_instance_option_btn()
self.ec2_handle.click_next_btn()
time.sleep(5)
self.ec2_handle.click_slide_bar_3()
self.ec2_handle.input_host_name("EHost01")
self.ec2_handle.input_host_desc("Test Hosts")
self.ec2_handle.input_host_mem_size("16")
self.ec2_handle.select_host_instance_type("t2.small")
self.ec2_handle.click_create_btn()
time.sleep(10)
self.infra_hosts_handle.check_creating_host(1)
time.sleep(2)
#self.infra_hosts_handle.wait_for_first_host_active("BOOTSTRAPPING")
self.infra_hosts_handle.wait_for_first_host_active("ACTIVE")
print "EC2 Hosts add complete"
self.driver.close()
def shutdown(self):
print "Inside tearDown"
self.driver.close()
def test_del_host(self):
self.driver.get(constants.RancherServerBaseURL + constants.welcome_url)
self.driver.get(constants.inventory_url)
time.sleep(2)
print "Going to delete hosts"
self.infra_hosts_handle.host_delete()
print "Hosts delete complete"
# @test(groups=["Packet"])
# class PacketTestHostAddDelete (object):
#
# @test
# def __init__(self):
# self.driver = webdriver.Firefox()
# self.driver.implicitly_wait(10)
# self.digital_handle = DigitalOceanPage.DigitalOcean(self.driver)
# self.infra_hosts_handle = InfraHostsPage.InfraHostsPage(self.driver)
# self.infra_page = InfraPage.InfraPage(self.driver)
# self.welcome_page = WelcomePage.WelcomePage(self.driver)
#
# @test
# def test_add_digital_host(self):
# try:
# print "Inside test_add_digital_ocean"
# self.driver.get(constants.host_add_Packet_url)
# self.digital_handle.select_quantity()
# self.digital_handle.click_slide_bar()
# self.digital_handle.input_host_name("DHost01")
# self.digital_handle.input_host_desc("DigitalOcean Hosts")
# self.digital_handle.input_access_token("a1a7c625c612c3d9a07300f1396fff2ff129342c63d4516b3101fdb61b1f2a48")
# self.digital_handle.select_image("ubuntu-14-04-x64")
# self.digital_handle.select_host_mem_size("16gb")
# self.digital_handle.select_region("San Francisco 1")
# self.digital_handle.click_create_btn()
# time.sleep(10)
# print "DigitalOcean Hosts add complete"
# except Exception as ex:
# traceback.print_exc()
#
# @test(depends_on=[test_add_digital_host])
# def test_del_host(self):
# self.driver.get(constants.RancherServerBaseURL + constants.welcome_url)
# self.driver.get(constants.inventory_url)
# time.sleep(2)
# print "Going to delete hosts"
# self.infra_hosts_handle.host_delete()
# print "Hosts delete complete"
#
# @test(depends_on=[test_del_host], always_run=True)
# def shutdown(self):
# print "Inside tearDown"
# self.driver.close()
#
# @test(groups=["Rackspace"])
# class RackspaceTestHostAddDelete (object):
#
# @test
# def __init__(self):
# self.driver = webdriver.Firefox()
# self.driver.implicitly_wait(10)
# self.digital_handle = DigitalOceanPage.DigitalOcean(self.driver)
# self.infra_hosts_handle = InfraHostsPage.InfraHostsPage(self.driver)
# self.infra_page = InfraPage.InfraPage(self.driver)
# self.welcome_page = WelcomePage.WelcomePage(self.driver)
#
# @test
# def test_add_digital_host(self):
# try:
# print "Inside test_add_digital_ocean"
# self.driver.get(constants.host_add_Rackspace_url)
# self.digital_handle.select_quantity()
# self.digital_handle.click_slide_bar()
# self.digital_handle.input_host_name("DHost01")
# self.digital_handle.input_host_desc("DigitalOcean Hosts")
# self.digital_handle.input_access_token("a1a7c625c612c3d9a07300f1396fff2ff129342c63d4516b3101fdb61b1f2a48")
# self.digital_handle.select_image("ubuntu-14-04-x64")
# self.digital_handle.select_host_mem_size("16gb")
# self.digital_handle.select_region("San Francisco 1")
# self.digital_handle.click_create_btn()
# time.sleep(10)
# print "DigitalOcean Hosts add complete"
# except Exception as ex:
# traceback.print_exc()
#
# @test(depends_on=[test_add_digital_host])
# def test_del_host(self):
# self.driver.get(constants.RancherServerBaseURL + constants.welcome_url)
# self.driver.get(constants.inventory_url)
# time.sleep(2)
# print "Going to delete hosts"
# self.infra_hosts_handle.host_delete()
# print "Hosts delete complete"
#
# @test(depends_on=[test_del_host], always_run=True)
# def shutdown(self):
# print "Inside tearDown"
# self.driver.close()
def test_host_add_delete():
from proboscis import TestProgram
# Run Proboscis and exit.
TestProgram().run_and_exit()
if __name__ == '__main__':
test_host_add_delete()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google BigQuery service.
"""
import os
from datetime import datetime
from airflow import models
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCheckOperator,
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateEmptyTableOperator,
BigQueryDeleteDatasetOperator,
BigQueryGetDataOperator,
BigQueryInsertJobOperator,
BigQueryIntervalCheckOperator,
BigQueryValueCheckOperator,
)
from airflow.utils.dates import days_ago
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
DATASET_NAME = os.environ.get("GCP_BIGQUERY_DATASET_NAME", "test_dataset")
LOCATION = "southamerica-east1"
TABLE_1 = "table1"
TABLE_2 = "table2"
SCHEMA = [
{"name": "value", "type": "INTEGER", "mode": "REQUIRED"},
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "ds", "type": "DATE", "mode": "NULLABLE"},
]
locations = [None, LOCATION]
for index, location in enumerate(locations, 1):
dag_id = "example_bigquery_queries_location" if location else "example_bigquery_queries"
DATASET = DATASET_NAME + str(index)
INSERT_DATE = datetime.now().strftime("%Y-%m-%d")
# [START howto_operator_bigquery_query]
INSERT_ROWS_QUERY = (
f"INSERT {DATASET}.{TABLE_1} VALUES "
f"(42, 'monthy python', '{INSERT_DATE}'), "
f"(42, 'fishy fish', '{INSERT_DATE}');"
)
# [END howto_operator_bigquery_query]
with models.DAG(
dag_id,
schedule_interval='@once', # Override to match your needs
start_date=days_ago(1),
tags=["example"],
user_defined_macros={"DATASET": DATASET, "TABLE": TABLE_1},
) as dag_with_locations:
create_dataset = BigQueryCreateEmptyDatasetOperator(
task_id="create-dataset",
dataset_id=DATASET,
location=location,
)
create_table_1 = BigQueryCreateEmptyTableOperator(
task_id="create_table_1",
dataset_id=DATASET,
table_id=TABLE_1,
schema_fields=SCHEMA,
location=location,
)
create_table_2 = BigQueryCreateEmptyTableOperator(
task_id="create_table_2",
dataset_id=DATASET,
table_id=TABLE_2,
schema_fields=SCHEMA,
location=location,
)
create_dataset >> [create_table_1, create_table_2]
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset", dataset_id=DATASET, delete_contents=True
)
# [START howto_operator_bigquery_insert_job]
insert_query_job = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration={
"query": {
"query": INSERT_ROWS_QUERY,
"useLegacySql": False,
}
},
location=location,
)
# [END howto_operator_bigquery_insert_job]
# [START howto_operator_bigquery_select_job]
select_query_job = BigQueryInsertJobOperator(
task_id="select_query_job",
configuration={
"query": {
"query": "{% include 'example_bigquery_query.sql' %}",
"useLegacySql": False,
}
},
location=location,
)
# [END howto_operator_bigquery_select_job]
execute_insert_query = BigQueryInsertJobOperator(
task_id="execute_insert_query",
configuration={
"query": {
"query": INSERT_ROWS_QUERY,
"useLegacySql": False,
}
},
location=location,
)
bigquery_execute_multi_query = BigQueryInsertJobOperator(
task_id="execute_multi_query",
configuration={
"query": {
"query": [
f"SELECT * FROM {DATASET}.{TABLE_2}",
f"SELECT COUNT(*) FROM {DATASET}.{TABLE_2}",
],
"useLegacySql": False,
}
},
location=location,
)
execute_query_save = BigQueryInsertJobOperator(
task_id="execute_query_save",
configuration={
"query": {
"query": f"SELECT * FROM {DATASET}.{TABLE_1}",
"useLegacySql": False,
"destinationTable": {
'projectId': PROJECT_ID,
'datasetId': DATASET,
'tableId': TABLE_2,
},
}
},
location=location,
)
# [START howto_operator_bigquery_get_data]
get_data = BigQueryGetDataOperator(
task_id="get_data",
dataset_id=DATASET,
table_id=TABLE_1,
max_results=10,
selected_fields="value,name",
location=location,
)
# [END howto_operator_bigquery_get_data]
get_data_result = BashOperator(
task_id="get_data_result",
bash_command=f"echo {get_data.output}",
)
# [START howto_operator_bigquery_check]
check_count = BigQueryCheckOperator(
task_id="check_count",
sql=f"SELECT COUNT(*) FROM {DATASET}.{TABLE_1}",
use_legacy_sql=False,
location=location,
)
# [END howto_operator_bigquery_check]
# [START howto_operator_bigquery_value_check]
check_value = BigQueryValueCheckOperator(
task_id="check_value",
sql=f"SELECT COUNT(*) FROM {DATASET}.{TABLE_1}",
pass_value=4,
use_legacy_sql=False,
location=location,
)
# [END howto_operator_bigquery_value_check]
# [START howto_operator_bigquery_interval_check]
check_interval = BigQueryIntervalCheckOperator(
task_id="check_interval",
table=f"{DATASET}.{TABLE_1}",
days_back=1,
metrics_thresholds={"COUNT(*)": 1.5},
use_legacy_sql=False,
location=location,
)
# [END howto_operator_bigquery_interval_check]
[create_table_1, create_table_2] >> insert_query_job >> select_query_job
insert_query_job >> execute_insert_query
execute_insert_query >> get_data >> get_data_result >> delete_dataset
execute_insert_query >> execute_query_save >> bigquery_execute_multi_query >> delete_dataset
execute_insert_query >> [check_count, check_value, check_interval] >> delete_dataset
globals()[dag_id] = dag_with_locations
| |
# This file is part of beets.
# Copyright 2012, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database.
"""
import logging
import musicbrainzngs
import traceback
import beets.autotag.hooks
import beets
from beets import util
SEARCH_LIMIT = 5
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.radbox.org/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
def get_message(self):
return u'"{0}" in {1} with query {2}'.format(
self._reasonstr(), self.verb, repr(self.query)
)
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits']
TRACK_INCLUDES = ['artists']
# python-musicbrainz-ngs search functions: tolerate different API versions.
if hasattr(musicbrainzngs, 'release_search'):
# Old API names.
_mb_release_search = musicbrainzngs.release_search
_mb_recording_search = musicbrainzngs.recording_search
else:
# New API names.
_mb_release_search = musicbrainzngs.search_releases
_mb_recording_search = musicbrainzngs.search_recordings
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, basestring):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
# An artist.
cur_artist_name = el['artist']['name']
artist_parts.append(cur_artist_name)
# Artist sort name.
if 'sort-name' in el['artist']:
artist_sort_parts.append(el['artist']['sort-name'])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if 'name' in el:
artist_credit_parts.append(el['name'])
else:
artist_credit_parts.append(cur_artist_name)
return (
''.join(artist_parts),
''.join(artist_sort_parts),
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium. Each number is a
1-based index.
"""
info = beets.autotag.hooks.TrackInfo(recording['title'],
recording['id'],
index=index,
medium=medium,
medium_index=medium_index)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length'])/(1000.0)
return info
def _set_date_str(info, date_str):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit'])
# Basic info.
track_infos = []
index = 0
for medium in release['medium-list']:
disctitle = medium.get('title')
for track in medium['track-list']:
index += 1
ti = track_info(track['recording'],
index,
int(medium['position']),
int(track['position']))
if track.get('title'):
# Track title may be distinct from underling recording
# title.
ti.title = track['title']
ti.disctitle = disctitle
track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo(
release['title'],
release['id'],
artist_name,
release['artist-credit'][0]['artist']['id'],
track_infos,
mediums=len(release['medium-list']),
artist_sort=artist_sort_name,
artist_credit=artist_credit_name,
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumdisambig = release['release-group'].get('disambiguation')
info.country = release.get('country')
info.albumstatus = release.get('status')
# Release type not always populated.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Release date.
if 'first-release-date' in release['release-group']:
# Try earliest release date for the entire group first.
_set_date_str(info, release['release-group']['first-release-date'])
elif 'date' in release:
# Fall back to release-specific date.
_set_date_str(info, release['date'])
# Label name.
if release.get('label-info-list'):
label_info = release['label-info-list'][0]
if label_info.get('label'):
label = label_info['label']['name']
if label != '[no label]':
info.label = label
info.catalognum = label_info.get('catalog-number')
# Text representation data.
if release.get('text-representation'):
rep = release['text-representation']
info.script = rep.get('script')
info.language = rep.get('language')
# Media (format).
if release['medium-list']:
first_medium = release['medium-list'][0]
info.media = first_medium.get('format')
return info
def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album.
"""
# Build search criteria.
criteria = {'release': album.lower()}
if artist is not None:
criteria['artist'] = artist.lower()
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = str(tracks)
# Abort if we have no search terms.
if not any(criteria.itervalues()):
return
try:
res = _mb_release_search(limit=limit, **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc())
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
albuminfo = album_for_id(release['id'])
if albuminfo is not None:
yield albuminfo
def match_track(artist, title, limit=SEARCH_LIMIT):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
criteria = {
'artist': artist.lower(),
'recording': title.lower(),
}
if not any(criteria.itervalues()):
return
try:
res = _mb_recording_search(limit=limit, **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc())
for recording in res['recording-list']:
yield track_info(recording)
def album_for_id(albumid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
try:
res = musicbrainzngs.get_release_by_id(albumid, RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug('Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get release by ID', albumid,
traceback.format_exc())
return album_info(res['release'])
def track_for_id(trackid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug('Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get recording by ID', trackid,
traceback.format_exc())
return track_info(res['recording'])
| |
"""Tests for distutils.command.sdist."""
import os
import unittest
import shutil
import zipfile
from os.path import join
import sys
import tempfile
import warnings
from test.support import captured_stdout, check_warnings, run_unittest
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
from distutils.tests.test_config import PyPIRCCommandTestCase
from distutils.errors import DistutilsExecError, DistutilsOptionError
from distutils.spawn import find_executable
from distutils.tests import support
from distutils.log import WARN
from distutils.archive_util import ARCHIVE_FORMATS
SETUP_PY = """
from distutils.core import setup
import somecode
setup(name='fake')
"""
MANIFEST = """\
# file GENERATED by distutils, do NOT edit
README
inroot.txt
setup.py
data%(sep)sdata.dt
scripts%(sep)sscript.py
some%(sep)sfile.txt
some%(sep)sother_file.txt
somecode%(sep)s__init__.py
somecode%(sep)sdoc.dat
somecode%(sep)sdoc.txt
"""
class SDistTestCase(PyPIRCCommandTestCase):
def setUp(self):
# PyPIRCCommandTestCase creates a temp dir already
# and put it in self.tmp_dir
super(SDistTestCase, self).setUp()
# setting up an environment
self.old_path = os.getcwd()
os.mkdir(join(self.tmp_dir, 'somecode'))
os.mkdir(join(self.tmp_dir, 'dist'))
# a package, and a README
self.write_file((self.tmp_dir, 'README'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#')
self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY)
os.chdir(self.tmp_dir)
def tearDown(self):
# back to normal
os.chdir(self.old_path)
super(SDistTestCase, self).tearDown()
def get_cmd(self, metadata=None):
"""Returns a cmd"""
if metadata is None:
metadata = {'name': 'fake', 'version': '1.0',
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'}
dist = Distribution(metadata)
dist.script_name = 'setup.py'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.dist_dir = 'dist'
def _warn(*args):
pass
cmd.warn = _warn
return dist, cmd
def test_prune_file_list(self):
# this test creates a package with some vcs dirs in it
# and launch sdist to make sure they get pruned
# on all systems
# creating VCS directories with some files in them
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
self.write_file((self.tmp_dir, 'somecode', '.hg',
'ok'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
self.write_file((self.tmp_dir, 'somecode', '.git',
'ok'), 'xxx')
# now building a sdist
dist, cmd = self.get_cmd()
# zip is available universally
# (tar might not be installed under win32)
cmd.formats = ['zip']
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything has been pruned correctly
self.assertEqual(len(content), 4)
def test_make_distribution(self):
# check if tar and gzip are installed
if (find_executable('tar') is None or
find_executable('gzip') is None):
return
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar then a tar
cmd.formats = ['gztar', 'tar']
cmd.ensure_finalized()
cmd.run()
# making sure we have two files
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'] )
os.remove(join(dist_folder, 'fake-1.0.tar'))
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
# now trying a tar then a gztar
cmd.formats = ['tar', 'gztar']
cmd.ensure_finalized()
cmd.run()
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
def test_add_defaults(self):
# http://bugs.python.org/issue2279
# add_default should also include
# data_files and package_data
dist, cmd = self.get_cmd()
# filling data_files by pointing files
# in package_data
dist.package_data = {'': ['*.cfg', '*.dat'],
'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
# adding some data in data_files
data_dir = join(self.tmp_dir, 'data')
os.mkdir(data_dir)
self.write_file((data_dir, 'data.dt'), '#')
some_dir = join(self.tmp_dir, 'some')
os.mkdir(some_dir)
self.write_file((self.tmp_dir, 'inroot.txt'), '#')
self.write_file((some_dir, 'file.txt'), '#')
self.write_file((some_dir, 'other_file.txt'), '#')
dist.data_files = [('data', ['data/data.dt',
'inroot.txt',
'notexisting']),
'some/file.txt',
'some/other_file.txt']
# adding a script
script_dir = join(self.tmp_dir, 'scripts')
os.mkdir(script_dir)
self.write_file((script_dir, 'script.py'), '#')
dist.scripts = [join('scripts', 'script.py')]
cmd.formats = ['zip']
cmd.use_defaults = True
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything was added
self.assertEqual(len(content), 11)
# checking the MANIFEST
f = open(join(self.tmp_dir, 'MANIFEST'))
try:
manifest = f.read()
self.assertEqual(manifest, MANIFEST % {'sep': os.sep})
finally:
f.close()
def test_metadata_check_option(self):
# testing the `medata-check` option
dist, cmd = self.get_cmd(metadata={})
# this should raise some warnings !
# with the `check` subcommand
cmd.ensure_finalized()
cmd.run()
warnings = self.get_logs(WARN)
self.assertEqual(len(warnings), 2)
# trying with a complete set of metadata
self.clear_logs()
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.metadata_check = 0
cmd.run()
warnings = self.get_logs(WARN)
self.assertEqual(len(warnings), 0)
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
dist, cmd = self.get_cmd()
with check_warnings() as w:
warnings.simplefilter("always")
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_show_formats(self):
with captured_stdout() as stdout:
show_formats()
# the output should be a header line + one line per format
num_formats = len(ARCHIVE_FORMATS.keys())
output = [line for line in stdout.getvalue().split('\n')
if line.strip().startswith('--formats=')]
self.assertEqual(len(output), num_formats)
def test_finalize_options(self):
dist, cmd = self.get_cmd()
cmd.finalize_options()
# default options set by finalize
self.assertEqual(cmd.manifest, 'MANIFEST')
self.assertEqual(cmd.template, 'MANIFEST.in')
self.assertEqual(cmd.dist_dir, 'dist')
# formats has to be a string splitable on (' ', ',') or
# a stringlist
cmd.formats = 1
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.formats = ['zip']
cmd.finalize_options()
# formats has to be known
cmd.formats = 'supazipa'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
def test_get_file_list(self):
# make sure MANIFEST is recalculated
dist, cmd = self.get_cmd()
# filling data_files by pointing files in package_data
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(len(manifest), 5)
# adding a file
self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
# make sure build_py is reinitinialized, like a fresh run
build_py = dist.get_command_obj('build_py')
build_py.finalized = False
build_py.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest2 = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
# do we have the new file in MANIFEST ?
self.assertEqual(len(manifest2), 6)
self.assertIn('doc2.txt', manifest2[-1])
def test_manifest_marker(self):
# check that autogenerated MANIFESTs have a marker
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest[0],
'# file GENERATED by distutils, do NOT edit')
def test_manual_manifest(self):
# check that a MANIFEST without a marker is left alone
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest, ['README.manual'])
def test_suite():
return unittest.makeSuite(SDistTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| |
from __future__ import division, print_function, absolute_import
import numpy as np
import numpy.testing as npt
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.integrate import IntegrationWarning
from scipy import stats
from scipy.special import betainc
from. common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_entropy_vect_scale,
check_edge_support, check_named_args,
check_random_state_property,
check_meth_dtype, check_ppf_dtype, check_cmplx_deriv,
check_pickling, check_rvs_broadcast)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
# Note that you need to add new distributions you want tested
# to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
# Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
distslow = ['kappa4', 'rdist', 'gausshyper',
'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# These distributions fail the complex derivative test below.
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
# on the implementation details of corresponding special functions.
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
fails_cmplx = set(['beta', 'betaprime', 'chi', 'chi2', 'dgamma', 'dweibull',
'erlang', 'f', 'gamma', 'gausshyper', 'gengamma',
'gennorm', 'genpareto', 'halfgennorm', 'invgamma',
'ksone', 'kstwobign', 'levy_l', 'loggamma', 'logistic',
'maxwell', 'nakagami', 'ncf', 'nct', 'ncx2', 'norminvgauss',
'pearson3', 'rice', 't', 'skewnorm', 'tukeylambda',
'vonmises', 'vonmises_line', 'rv_histogram_instance'])
_h = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
histogram_test_instance = stats.rv_histogram(_h)
def cases_test_cont_basic():
for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
if distname == 'levy_stable':
continue
elif distname in distslow:
yield pytest.param(distname, arg, marks=pytest.mark.slow)
else:
yield distname, arg
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
def test_cont_basic(distname, arg):
# this test skips slow distributions
if distname == 'truncnorm':
pytest.xfail(reason=distname)
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
np.random.seed(765456)
sn = 500
with suppress_warnings() as sup:
# frechet_l and frechet_r are deprecated, so all their
# methods generate DeprecationWarnings.
sup.filter(category=DeprecationWarning, message=".*frechet_")
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, distname + 'sample mean test')
check_cdf_ppf(distfn, arg, distname)
check_sf_isf(distfn, arg, distname)
check_pdf(distfn, arg, distname)
check_pdf_logpdf(distfn, arg, distname)
check_cdf_logcdf(distfn, arg, distname)
check_sf_logsf(distfn, arg, distname)
alpha = 0.01
if distname == 'rv_histogram_instance':
check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
else:
check_distribution_rvs(distname, arg, alpha, rvs)
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3,
'rv_histogram_instance': 5.0}
x = spec_x.get(distname, 0.5)
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
check_named_args(distfn, x, arg, locscale_defaults, meths)
check_random_state_property(distfn, arg)
check_pickling(distfn, arg)
# Entropy
if distname not in ['ksone', 'kstwobign']:
check_entropy(distfn, arg, distname)
if distfn.numargs == 0:
check_vecentropy(distfn, arg)
if (distfn.__class__._entropy != stats.rv_continuous._entropy
and distname != 'vonmises'):
check_private_entropy(distfn, arg, stats.rv_continuous)
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "Extremely bad integrand")
sup.filter(RuntimeWarning, "invalid value")
check_entropy_vect_scale(distfn, arg)
check_edge_support(distfn, arg)
check_meth_dtype(distfn, arg, meths)
check_ppf_dtype(distfn, arg)
if distname not in fails_cmplx:
check_cmplx_deriv(distfn, arg)
if distname != 'truncnorm':
check_ppf_private(distfn, arg, distname)
def test_levy_stable_random_state_property():
# levy_stable only implements rvs(), so it is skipped in the
# main loop in test_cont_basic(). Here we apply just the test
# check_random_state_property to levy_stable.
check_random_state_property(stats.levy_stable, (0.5, 0.1))
def cases_test_moments():
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
if distname == 'levy_stable':
continue
cond1 = distname not in fail_normalization
cond2 = distname not in fail_higher
yield distname, arg, cond1, cond2, False
if not cond1 or not cond2:
# Run the distributions that have issues twice, once skipping the
# not_ok parts, once with the not_ok parts but marked as knownfail
yield pytest.param(distname, arg, True, True, True,
marks=pytest.mark.xfail)
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,is_xfailing',
cases_test_moments())
def test_moments(distname, arg, normalization_ok, higher_ok, is_xfailing):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The integral is probably divergent, or slowly convergent.")
sup.filter(category=DeprecationWarning, message=".*frechet_")
if is_xfailing:
sup.filter(IntegrationWarning)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
if normalization_ok:
check_normalization(distfn, arg, distname)
if higher_ok:
check_mean_expect(distfn, arg, m, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
check_var_expect(distfn, arg, m, v, distname)
check_kurt_expect(distfn, arg, m, v, k, distname)
check_loc_scale(distfn, arg, m, v, distname)
check_moment(distfn, arg, m, v, distname)
@pytest.mark.parametrize('dist,shape_args', distcont)
def test_rvs_broadcast(dist, shape_args):
if dist in ['gausshyper', 'genexpon']:
pytest.skip("too slow")
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['betaprime', 'dgamma', 'exponnorm', 'norminvgauss',
'nct', 'dweibull', 'rice', 'levy_stable', 'skewnorm']
distfunc = getattr(stats, dist)
loc = np.zeros(2)
scale = np.ones((3, 1))
nargs = distfunc.numargs
allargs = []
bshape = [3, 2]
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 4,) + (1,)*(k + 2)
allargs.append(shape_args[k]*np.ones(shp))
bshape.insert(0, k + 4)
allargs.extend([loc, scale])
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
def test_rvs_gh2069_regression():
# Regression tests for gh-2069. In scipy 0.17 and earlier,
# these tests would fail.
#
# A typical example of the broken behavior:
# >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
# array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
np.random.seed(123)
vals = stats.norm.rvs(loc=np.zeros(5), scale=1)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=0, scale=np.ones(5))
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5))
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5))
d = np.diff(vals.ravel())
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
[[1, 1], [1, 1]], 1)
assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
[[1], [2]], (4,))
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if np.isfinite(m):
check_sample_mean(sm, sv, sn, m)
if np.isfinite(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm, v, n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv, n, popvar):
# two-sided chisquare test for sample variance equal to
# hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn, arg, msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
1.0 - distfn.sf([0.1, 0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or
# huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
msg += ' - cdf-pdf relationship'
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
msg += " - logsf-log(sf) relationship"
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
msg += " - logcdf-log(cdf) relationship"
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
# fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
| |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@ecdsa.org
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import util
from bitcoin import *
MAX_TARGET = 0x00000000FFFF0000000000000000000000000000000000000000000000000000
class Blockchain(util.PrintError):
'''Manages blockchain headers and their verification'''
def __init__(self, config, network):
self.config = config
self.network = network
self.headers_url = "https://headers.electrum.org/blockchain_headers"
self.local_height = 0
self.set_local_height()
def height(self):
return self.local_height
def init(self):
self.init_headers_file()
self.set_local_height()
self.print_error("%d blocks" % self.local_height)
def verify_header(self, header, prev_header, bits, target):
prev_hash = self.hash_header(prev_header)
assert prev_hash == header.get('prev_block_hash'), "prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash'))
assert bits == header.get('bits'), "bits mismatch: %s vs %s" % (bits, header.get('bits'))
_hash = self.hash_header(header)
assert int('0x' + _hash, 16) <= target, "insufficient proof of work: %s vs target %s" % (int('0x' + _hash, 16), target)
def verify_chain(self, chain):
first_header = chain[0]
prev_header = self.read_header(first_header.get('block_height') - 1)
for header in chain:
height = header.get('block_height')
bits, target = self.get_target(height / 2016, chain)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def verify_chunk(self, index, data):
num = len(data) / 80
prev_header = None
if index != 0:
prev_header = self.read_header(index*2016 - 1)
bits, target = self.get_target(index)
for i in range(num):
raw_header = data[i*80:(i+1) * 80]
header = self.deserialize_header(raw_header)
self.verify_header(header, prev_header, bits, target)
prev_header = header
def serialize_header(self, res):
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
return s
def deserialize_header(self, s):
hex_to_int = lambda s: int('0x' + s[::-1].encode('hex'), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
return h
def hash_header(self, header):
if header is None:
return '0' * 64
return hash_encode(Hash(self.serialize_header(header).decode('hex')))
def path(self):
return util.get_headers_path(self.config)
def init_headers_file(self):
filename = self.path()
if os.path.exists(filename):
return
try:
import urllib, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", self.headers_url)
urllib.urlretrieve(self.headers_url, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
def save_chunk(self, index, chunk):
filename = self.path()
f = open(filename, 'rb+')
f.seek(index * 2016 * 80)
h = f.write(chunk)
f.close()
self.set_local_height()
def save_header(self, header):
data = self.serialize_header(header).decode('hex')
assert len(data) == 80
height = header.get('block_height')
filename = self.path()
f = open(filename, 'rb+')
f.seek(height * 80)
h = f.write(data)
f.close()
self.set_local_height()
def set_local_height(self):
name = self.path()
if os.path.exists(name):
h = os.path.getsize(name)/80 - 1
if self.local_height != h:
self.local_height = h
def read_header(self, block_height):
name = self.path()
if os.path.exists(name):
f = open(name, 'rb')
f.seek(block_height * 80)
h = f.read(80)
f.close()
if len(h) == 80:
h = self.deserialize_header(h)
return h
def get_target(self, index, chain=None):
if index == 0:
return 0x1d00ffff, MAX_TARGET
first = self.read_header((index-1) * 2016)
last = self.read_header(index*2016 - 1)
if last is None:
for h in chain:
if h.get('block_height') == index*2016 - 1:
last = h
assert last is not None
# bits to target
bits = last.get('bits')
bitsN = (bits >> 24) & 0xff
assert bitsN >= 0x03 and bitsN <= 0x1d, "First part of bits should be in [0x03, 0x1d]"
bitsBase = bits & 0xffffff
assert bitsBase >= 0x8000 and bitsBase <= 0x7fffff, "Second part of bits should be in [0x8000, 0x7fffff]"
target = bitsBase << (8 * (bitsN-3))
# new target
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = 14 * 24 * 60 * 60
nActualTimespan = max(nActualTimespan, nTargetTimespan / 4)
nActualTimespan = min(nActualTimespan, nTargetTimespan * 4)
new_target = min(MAX_TARGET, (target*nActualTimespan) / nTargetTimespan)
# convert new target to bits
c = ("%064x" % new_target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) / 2, int('0x' + c[:6], 16)
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
new_bits = bitsN << 24 | bitsBase
return new_bits, bitsBase << (8 * (bitsN-3))
def connect_header(self, chain, header):
'''Builds a header chain until it connects. Returns True if it has
successfully connected, False if verification failed, otherwise the
height of the next header needed.'''
chain.append(header) # Ordered by decreasing height
previous_height = header['block_height'] - 1
previous_header = self.read_header(previous_height)
# Missing header, request it
if not previous_header:
return previous_height
# Does it connect to my chain?
prev_hash = self.hash_header(previous_header)
if prev_hash != header.get('prev_block_hash'):
self.print_error("reorg")
return previous_height
# The chain is complete. Reverse to order by increasing height
chain.reverse()
try:
self.verify_chain(chain)
self.print_error("new height:", previous_height + len(chain))
for header in chain:
self.save_header(header)
return True
except BaseException as e:
self.print_error(str(e))
return False
def connect_chunk(self, idx, hexdata):
try:
data = hexdata.decode('hex')
self.verify_chunk(idx, data)
self.print_error("validated chunk %d" % idx)
self.save_chunk(idx, data)
return idx + 1
except BaseException as e:
self.print_error('verify_chunk failed', str(e))
return idx - 1
| |
"""The Remote (i.e. usually client-side) model."""
import wx
import wx.lib.sized_controls as sc
from wx import ImageFromStream, BitmapFromImage
import cStringIO
from twisted.python import log
from twisted.spread.flavors import NoSuchMethod
from pyrope.model.shared import *
from pyrope.model.events import *
import copy
from zope.interface import Interface, Attribute, implements
class RemoteApplication(pb.Copyable, pb.RemoteCopy):
"""Describes a Pyrope application, with a reference to the application handler (pb.Referenceable on the server, pb.RemoteReference on the client)"""
def __init__(self, app):
self.name = app.name
self.description = app.description
self.server = app
pb.setUnjellyableForClass(RemoteApplication, RemoteApplication)
class WidgetConstructorDetails(pb.Copyable, pb.RemoteCopy):
"""Describes a information needed by the client to create a local wxWidget which represents a server-side Pyrope widget.
It is a U{Parameter Object<http://www.refactoring.com/catalog/introduceParameterObject.html>}"""
def __init__(self, remoteWidgetReference, type, constructorData, otherData=None, styleData=None, children=None, eventHandlers=None):
self.remoteWidgetReference = remoteWidgetReference
self.type = type
self.constructorData = constructorData
self.otherData = otherData
self.styleData = styleData
self.children = children
self.eventHandlers = eventHandlers
pb.setUnjellyableForClass(WidgetConstructorDetails, WidgetConstructorDetails)
class ClientChangeset(pb.Copyable):
"""Describes changes to widget properties. Order of changes made is not preserved. Clear should be called after changeset has been
sent to server.
Changeset entry: widget : (property name, value)"""
def __init__(self):
self.changes = {}
def addChange(self, widget, propertyName, newValue):
"""Adds chage to changeset."""
self.changes[widget] = (propertyName, newValue)
def clear(self):
self.changes.clear()
def isEmpty(self):
"""@return: True is changes dict has some entires, False otherwise."""
if len(self.changes):
return False
return True
class ServerChangeset(pb.RemoteCopy):
"""Server-side RemoteCopy of ClientChangeset. Call apply() when you want to apply chages."""
def apply(self):
"""Apply changes in changeset. Iterates through each change in the changeset and applies them.
Note: This method assumes Python properties are used to access Prope Widget attributes that need to update the client side when changed.
If the client asked to update property "foo", setting foo directly will cause it to update the client-side again!, so, it assumes
for every foo there is a "_foo" attribute where the value is stored. This really should be done in a better way, but for now, it works provided
widgets stick to this naming convention:
e.g.
class TextBox(Window):
...
def _getValue(self):
return self._value
def _setValue(self, value):
self._value = value
return self.callRemote("setValue", value)
value = property(_getValue, _setValue)
"""
for widget, (propName, value) in self.changes.items():
setattr(widget, "_"+propName, value)
pb.setUnjellyableForClass(ClientChangeset, ServerChangeset)
class PyropeReferenceable(pb.Referenceable):
"""Subclasses pb.Referenceable so that it calls self.widget.somemethod when remote_somemethod connot be found.
This makes it simpler to wrap methods on wxWidgets classes."""
def remoteMessageReceived(self, broker, message, args, kw):
""" Calls self.widget.somemethod when remote_somemethod connot be found"""
try:
return pb.Referenceable.remoteMessageReceived(self, broker, message, args, kw)
except NoSuchMethod:
return getattr(self.widget, message)()
def returnWxPythonObject(object):
"""Use this method when returning objects from wxPython methods. Why? E.g. say a wxPython returns a wxPoint, we can't send this directly over the netowork
(Twisted Perspective Broker won't allow it for security reasons), so we can just send a tuple with the coordinates instead. The default behaviour is
just to return the passed argument"""
def returnDefault(object):
return object
getattr(returnWxPythonObject, "return"+object.__class__.__name__, returnDefault)
def returnPoint(object):
return (object.x, object.y)
class WindowReference(PyropeReferenceable):
"""Manages a local wxWindow"""
#list of events which indicate that the data has changed in this control
#e.g. wx.EVT_TEXT for a TextBox. Sublasses should override this attribute
#so things don't break, this class has an empty list
changeEvents = []
def __init__(self, app, widget, remote, handlers):
self.app = app #RemoteApplicationHandler
self.widget = widget #wxWindow
self.remote = remote #server-side Pyrope widget refernce
self.boundEvents = [] #bound Pyrope events, e.g. EventClose
self.children = [] #references of children
#bind all event handlers server is interested in
for event in handlers:
eventClass = eval(event)
self.boundEvents.append(eventClass)
self.widget.Bind(eventClass.wxEventClass, self.handleEvent)
#we need to listen for changes to the widget, so that a changeset can be generated
for event in self.changeEvents:
self.widget.Bind(event, self.recordChange)
#recording changes
def recordChange(self, event):
#let the application class handle it
self.app.recordChange(self, event)
def getChangeData(self, event):
"""Given an event instance, this method should figure out what property should be updated with what data.
@return: (property name, value)"""
#TODO: implement something for wxWindow here
#closing and detroying window
def remote_Destroy(self):
self._destroy()
def _destroy(self):
self.widget.Destroy()
def onClose(self, event):
if CloseEvent in self.boundEvents:
self.handleEvent(event)
else:
#if the programmer hasn't handled the close event specifically, then the default behaviour is to close the form
self._destroy()
#event handling
def handleEvent(self, event):
#let the application class handle it
self.app.handleEvent(self, event)
#other methods
def remote_Centre(self, direction, centreOnScreen):
dir = direction
if centreOnScreen:
dir | wx.CENTRE_ON_SCREEN
return self.widget.Centre(direction = dir)
def remote_ClientToScreen(self, (x, y)):
return self.widget.ClientToScreenXY(x, y)
def remote_setBackgroundColour(self, colour):
self.widget.SetBackgroundColour(colour)
# self.widget.Refresh()
class TopLevelWindowReference(WindowReference):
def _destroy(self):
"""Check to see if this is the last window open (for this app) and if so, call shutdown on the RemoteApplicationHandler instance.
Finally, destroy the widget."""
self.app.topLevelWindows.remove(self.widget)
if not self.app.topLevelWindows:
self.app.shutdown()
self.widget.Destroy()
class FrameReference(TopLevelWindowReference):
pass
class DialogReference(TopLevelWindowReference):
def remote_ShowModal(self):
return self.widget.ShowModal()
class TextEntryDialogReference(DialogReference):
def remote_showModalAndGetValue(self):
id = self.widget.ShowModal()
val = self.widget.GetValue()
return (id, val)
class TextBoxReference(WindowReference):
changeEvents = [wx.EVT_TEXT]
def getChangeData(self, event):
if event.GetEventType() == wx.EVT_TEXT.typeId:
return ("value", self.widget.GetValue())
def remote_setValue(self, value):
return self.widget.SetValue(value)
class LabelReference(WindowReference):
def remote_setLabel(self, label):
return self.widget.SetLabel(label)
class SliderReference(WindowReference):
changeEvents = [wx.EVT_SCROLL_CHANGED]
def getChangeData(self, event):
if event.GetEventType() == wx.wx.EVT_SCROLL_CHANGED.typeId:
return ("value", self.widget.GetValue())
class GaugeReference(WindowReference):
def remote_getData(self):
return self.widget.GetValue()
def remote_setData(self, data):
return self.widget.SetValue(data)
class ControlWithItemsReference(WindowReference):
def remote_setSelectedIndex(self, index):
return self.widget.SetSelection(index)
def remote_setChoices(self, data):
self.widget.Clear()
for item in data:
self.widget.Append(item)
self.widget.Update()
def remote_setChoice(self, index, data):
self.widget.SetString(index, data)
def remote_append(self, data):
self.widget.Append(data)
def remote_delete(self, index):
self.widget.Delete(index)
class ChoiceReference(ControlWithItemsReference):
changeEvents = [wx.EVT_CHOICE]
def getChangeData(self, event):
if event.GetEventType() == wx.wx.EVT_CHOICE.typeId:
return ("selectedIndex", self.widget.GetSelection())
class ListBoxReference(ControlWithItemsReference):
changeEvents = [wx.EVT_LISTBOX]
def getChangeData(self, event):
if event.GetEventType() == wx.wx.EVT_LISTBOX.typeId:
return ("selectedIndex", self.widget.GetSelection())
class ListCtrlReference(ControlWithItemsReference):
changeEvents = [wx.EVT_LIST_ITEM_SELECTED]
def getChangeData(self, event):
if event.GetEventType() == wx.wx.EVT_LIST_ITEM_SELECTED.typeId:
return ("selectedIndex", self.widget.GetSelection())
class MenuBarReference(WindowReference):
def onMenu(self, event):
print event.GetEventObject()
class MenuItemReference(object):
def __init__(self, menuBarRef, widget):
self.menuBarRef = menuBarRef
self.widget = widget
def onMenu(self, event):
#XXX:hack! move this into RemoteApplicationHandler
changeset = None
if self.menuBarRef.app.changeset:
changeset = self.menuBarRef.app.changeset
self.menuBarRef.remote.callRemote("menuItemSelected", self.widget.GetId(), changeset)
class StatusBarReference(WindowReference):
def remote_setFields(self, fields):
self.widget.SetFieldsCount(len(fields))
for index, text in fields.items():
self.widget.SetStatusText(text, index)
############
# Builders #
############
class WidgetBuilder(object):
def replaceParent(self, app, widgetData):
parent = widgetData.constructorData["parent"]
if parent:
widget = app.widgets[parent]
if isinstance(widget, (sc.SizedFrame, sc.SizedDialog)):
widgetData.constructorData["parent"] = widget.GetContentsPane()
else:
widgetData.constructorData["parent"] = widget
def createLocalReference(self, app, widgetData):
#XXX: this will break if called from a WidgetBuilder instance!
if widgetData.styleData:
widgetData.constructorData["style"] = widgetData.styleData
window = self.widgetClass(**widgetData.constructorData)
localRef = self.referenceClass(app, window, widgetData.remoteWidgetReference, widgetData.eventHandlers)
app.widgets[widgetData.remoteWidgetReference] = localRef.widget
if widgetData.children:
for childData in widgetData.children:
childRef = WidgetFactory.create(app, childData)
#server needs to know about child reference
childData.remoteWidgetReference.callRemote("updateRemote", childRef)
#add to localRef children
localRef.children.append(childRef)
return localRef
class TopLevelWindowBuilder(WidgetBuilder):
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
localRef.widget.Bind(wx.EVT_CLOSE, localRef.onClose)
app.topLevelWindows.append(localRef.widget)
return localRef
#class FrameBuilder(TopLevelWindowBuilder):
# widgetClass = wx.Frame
# referenceClass = FrameReference
#class MiniFrameBuilder(TopLevelWindowBuilder):
# widgetClass = wx.MiniFrame
# referenceClass = FrameReference
#class DialogBuilder(TopLevelWindowBuilder):
# widgetClass = wx.Dialog
# referenceClass = DialogReference
class DialogBuilder(TopLevelWindowBuilder):
widgetClass = sc.SizedDialog
referenceClass = DialogReference
def replaceParent(self, app, widgetData):
parent = widgetData.constructorData["parent"]
if parent:
widgetData.constructorData["parent"] = app.widgets[parent]
class MessageDialogBuilder(DialogBuilder):
widgetClass = wx.MessageDialog
referenceClass = DialogReference
class TextEntryDialogBuilder(DialogBuilder):
widgetClass = wx.TextEntryDialog
referenceClass = TextEntryDialogReference
class FrameBuilder(TopLevelWindowBuilder):
widgetClass = sc.SizedFrame
referenceClass = FrameReference
def createLocalReference(self, app, widgetData):
localRef = TopLevelWindowBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget.GetContentsPane()
widget.SetSizerType(widgetData.otherData["sizerType"])
#create menus
menuData = widgetData.otherData["menuBar"]
if menuData:
menuBarRef = WidgetFactory.create(app, menuData)
menuData.remoteWidgetReference.callRemote("updateRemote", menuBarRef)
localRef.widget.SetMenuBar(menuBarRef.widget)
return localRef
class PanelBuilder(WidgetBuilder):
widgetClass = sc.SizedPanel
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
widget.SetSizerType(widgetData.otherData["sizerType"])
return localRef
class TextBoxBuilder(WidgetBuilder):
widgetClass = wx.TextCtrl
referenceClass = TextBoxReference
# def createLocalReference(self, app, widgetData):
# localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
# localRef.widget.Bind(wx.EVT_TEXT, localRef.onText)
# return localRef
class LabelBuilder(WidgetBuilder):
widgetClass = wx.StaticText
referenceClass = LabelReference
class ButtonBuilder(WidgetBuilder):
widgetClass = wx.Button
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
if widgetData.otherData["default"]:
widget.SetDefault()
return localRef
class BitmapButtonBuilder(ButtonBuilder):
widgetClass = wx.BitmapButton
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
stream = cStringIO.StringIO(widgetData.otherData["image"])
bitmap = BitmapFromImage(ImageFromStream(stream))
widgetData.constructorData["bitmap"] = bitmap
return ButtonBuilder.createLocalReference(self, app, widgetData)
class ChoiceBuilder(WidgetBuilder):
widgetClass = wx.Choice
referenceClass = ChoiceReference
class CheckBoxBuilder(WidgetBuilder):
widgetClass = wx.CheckBox
referenceClass = WindowReference
class GaugeBuilder(WidgetBuilder):
widgetClass = wx.Gauge
referenceClass = GaugeReference
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
widget.SetValue(widgetData.otherData["value"])
return localRef
class SliderBuilder(WidgetBuilder):
widgetClass = wx.Slider
referenceClass = SliderReference
class ListBoxBuilder(WidgetBuilder):
widgetClass = wx.ListBox
referenceClass = ListBoxReference
class ListCtrlBuilder(WidgetBuilder):
widgetClass = wx.ListCtrl
referenceClass = ListCtrlReference
class CheckListBoxBuilder(WidgetBuilder):
widgetClass = wx.CheckListBox
referenceClass = WindowReference
class SpinnerBuilder(WidgetBuilder):
widgetClass = wx.SpinCtrl
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
range = widgetData.otherData["range"]
widget.SetRange(range[0],range[1])
return localRef
class RadioBoxBuilder(WidgetBuilder):
widgetClass = wx.RadioBox
referenceClass = WindowReference
class BoxBuilder(WidgetBuilder):
widgetClass = wx.StaticBox
referenceClass = WindowReference
class LineBuilder(WidgetBuilder):
widgetClass = wx.StaticLine
referenceClass = WindowReference
class MenuBarBuilder(WidgetBuilder):
widgetClass = wx.MenuBar
referenceClass = MenuBarReference
def replaceParent(self, app, widgetData):
pass
def createLocalReference(self, app, widgetData):
menuBar = self.widgetClass(**widgetData.constructorData)
localRef = self.referenceClass(app, menuBar, widgetData.remoteWidgetReference, widgetData.eventHandlers)
app.widgets[widgetData.remoteWidgetReference] = localRef.widget
#create menus
for menu in widgetData.otherData["menus"]:
wxMenu = wx.Menu()
for item in menu.items:
form = app.widgets[widgetData.otherData["form"]]
wxMenuItem = wx.MenuItem(wxMenu, item.id, item.text, item.help)
itemRef = MenuItemReference(localRef, wxMenuItem)
wx.EVT_MENU(form, item.id, itemRef.onMenu)
wxMenu.AppendItem(wxMenuItem)
menuBar.Append(wxMenu, menu.title)
return localRef
class ToolBarBuilder(WidgetBuilder):
widgetClass = wx.ToolBar
referenceClass = WindowReference
def replaceParent(self, app, widgetData):
parent = widgetData.constructorData["parent"]
if parent:
widget = app.widgets[parent]
widgetData.constructorData["parent"] = widget
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
for label, image in widgetData.otherData["tools"]:
stream = cStringIO.StringIO(image)
bitmap = BitmapFromImage(ImageFromStream(stream))
widget.AddLabelTool(wx.ID_ANY, label, bitmap)
frame = widgetData.constructorData["parent"]
frame.SetToolBar(widget)
widget.Realize()
return localRef
class StatusBarBuilder(WidgetBuilder):
widgetClass = wx.StatusBar
referenceClass = StatusBarReference
def replaceParent(self, app, widgetData):
parent = widgetData.constructorData["parent"]
if parent:
widget = app.widgets[parent]
widgetData.constructorData["parent"] = widget
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
widget.SetFieldsCount(len(widgetData.otherData["fields"]))
index = 0
for text in widgetData.otherData["fields"]:
widget.SetStatusText(text, index)
index += 1
frame = widgetData.constructorData["parent"]
frame.SetStatusBar(widget)
return localRef
class ImageBuilder(WidgetBuilder):
widgetClass = wx.Image
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
parent = app.widgets[widgetData.otherData["parent"]]
stream = cStringIO.StringIO(widgetData.otherData["data"])
bitmap = BitmapFromImage(ImageFromStream(stream))
localRef = self.referenceClass(app, bitmap, widgetData.remoteWidgetReference, widgetData.eventHandlers)
app.widgets[widgetData.remoteWidgetReference] = localRef.widget
#if parent is a panel, dispay the image
if isinstance(parent, wx.Panel):
wx.StaticBitmap(parent, wx.ID_ANY, bitmap)
return localRef
class SplitterBuilder(WidgetBuilder):
widgetClass = wx.SplitterWindow
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
widget = localRef.widget
#if no children, return
if not localRef.children:
pass
#if only one child, use initialise
elif len(localRef.children) is 1:
widget.Initialize(localRef.children[0].widget)
#if two, spit horizontally or vertically
elif widgetData.otherData["mode"] is "horizontal":
widget.SplitHorizontally(localRef.children[0].widget, localRef.children[1].widget)
else:
widget.SplitVertically(localRef.children[0].widget, localRef.children[1].widget)
widget.SetMinimumPaneSize(widgetData.otherData["minimumPaneSize"])
return localRef
class NotebookBuilder(WidgetBuilder):
widgetClass = wx.Notebook
referenceClass = WindowReference
def createLocalReference(self, app, widgetData):
localRef = WidgetBuilder.createLocalReference(self, app, widgetData)
nb = localRef.widget
pages = widgetData.otherData["pages"]
for title, page in pages:
pageWidget = app.widgets[page]
nb.AddPage(pageWidget, title)
return localRef
##################
# Widget Factory #
##################
class WidgetFactory(object):
"""A Factory that produces wxWidgets based on the class of the remote Pyrope widget passed to the constructor."""
@classmethod
def create(cls, app, widgetData):
builder = eval(widgetData.type+"Builder")()
#if the remote widget has a parent (supplied as a pb.RemoteReference) replace the attribute with the coressponding wxWindow which is it's real parent
builder.replaceParent(app, widgetData)
#create the local pb.Referenceable that will manage this widget
return builder.createLocalReference(app, widgetData)
class RemoteApplicationHandler(pb.Referenceable):
def __init__(self, app, appPresenter):
self.app = app
self.appPresenter = appPresenter
#only for wx.Frame and wx.Dialog
self.topLevelWindows = [] #wxWidgets
self.widgets = {} #RemoteReference (Pyrope Widget) -> wxWidget
self.changeset = ClientChangeset() #when changes are pending, this should hold them
self.pendingEvents = [] #list of events which are still being processed by server
def shutdown(self):
"""Called by application when last window is closed for that application."""
def _shutdown(result):
self.appPresenter.shutdownApplication(self.app)
return self.app.server.callRemote("shutdownApplication", self).addCallback(_shutdown)
def remote_createWidget(self, widgetData):
"""Called by server when it wants to create a new widget."""
#create widget and local proxy and return pb.RemoteReference to server
return WidgetFactory.create(self, widgetData)
def recordChange(self, widget, event):
"""Records change in changeset instance, and passes the event on to the next handler."""
propertyValueTuple = widget.getChangeData(event)
if propertyValueTuple:
self.changeset.addChange(widget.remote, propertyValueTuple[0], propertyValueTuple[1])
event.Skip()
def handleEvent(self, widget, event, changeset=None):
"""This gets called when an event occurs that the server is interested in. We send the server the event data and
also the changes that have occurred since the last change. It keeps a list of pending events, and if one is still
being executed on the server, it adds the event to the list and returns. When an event completes, it fires of the
next event in that list.
When called from widgets handling events, the changet will not be supplied, and it will be taken from this instnace.
When called from this handler after an event has fired, the changeset will be supplied."""
def _done(result):
#we are done with the event
del self.pendingEvents[0]
#if there are pending events, pop end of list and process (like a FIFO queue)
if self.pendingEvents:
widget, eventData, changeset = self.pendingEvents[0]
#handle event
widget.remote.callRemote("handleEvent", eventData, changeset).addCallback(_done)
#get event data for this event type
eventData = EventFactory.create(widget.remote, event)
#get changeset data, if it wasn't supplied to this function (i.e. from the event queue)
if not changeset and not self.changeset.isEmpty():
changeset = copy.copy(self.changeset)
#if there are events still being processed, add this to chain and return
if self.pendingEvents:
#add to head of list
self.pendingEvents.append((widget, eventData, changeset))
self.changeset.clear()
return
#otherwise, we can process this event
#add this event to the pending events queue
self.pendingEvents.append((widget, eventData, changeset))
#handle event
widget.remote.callRemote("handleEvent", eventData, changeset).addCallback(_done)
#clear changeset now that server knows about it
self.changeset.clear()
class PyropeClientHandler(pb.Referenceable):
pass
| |
"""
slice3.py - plot 3D data on a uniform tensor-product grid as a set of
three adjustable xy, yz, and xz plots
Copyright (c) 2013 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created on Wed Dec 4 11:24:14 MST 2013
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
def meshgrid3(x,y,z):
""" Create a three-dimensional meshgrid """
nx = len(x)
ny = len(y)
nz = len(z)
xx = np.swapaxes(np.reshape(np.tile(x,(1,ny,nz)),(nz,ny,nx)),0,2)
yy = np.swapaxes(np.reshape(np.tile(y,(nx,1,nz)),(nx,nz,ny)),1,2)
zz = np.tile(z,(nx,ny,1))
return xx,yy,zz
class DiscreteSlider(Slider):
"""A matplotlib slider widget with discrete steps.
Created by Joe Kington and submitted to StackOverflow on Dec 1 2012
http://stackoverflow.com/questions/13656387/can-i-make-matplotlib-sliders-more-discrete
"""
def __init__(self, *args, **kwargs):
"""Identical to Slider.__init__, except for the "increment" kwarg.
"increment" specifies the step size that the slider will be discritized
to."""
self.inc = kwargs.pop('increment', 1)
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
# Suppress slider label
self.valtext.set_text('')
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
class slice3(object):
def __init__(self,xx,yy,zz,u):
self.x = xx[:,0,0]
self.y = yy[0,:,0]
self.z = zz[0,0,:]
self.data = u
self.fig = plt.figure(1,(20,7))
self.ax1 = self.fig.add_subplot(131,aspect='equal')
self.ax2 = self.fig.add_subplot(132,aspect='equal')
self.ax3 = self.fig.add_subplot(133,aspect='equal')
self.xplot_zline = self.ax1.axvline(color='m',linestyle='--',lw=2)
self.xplot_zline.set_xdata(self.z[0])
self.xplot_yline = self.ax1.axhline(color='m',linestyle='--',lw=2)
self.xplot_yline.set_ydata(self.y[0])
self.yplot_xline = self.ax2.axhline(color='m',linestyle='--',lw=2)
self.yplot_xline.set_ydata(self.x[0])
self.yplot_zline = self.ax2.axvline(color='m',linestyle='--',lw=2)
self.yplot_zline.set_xdata(self.z[0])
self.zplot_xline = self.ax3.axvline(color='m',linestyle='--',lw=2)
self.zplot_xline.set_xdata(self.x[0])
self.zplot_yline = self.ax3.axhline(color='m',linestyle='--',lw=2)
self.zplot_yline.set_ydata(self.y[0])
self.xslice = self.ax1.imshow(u[0,:,:],extent=(self.z[0],self.z[-1],self.y[0],self.y[-1]))
self.yslice = self.ax2.imshow(u[:,0,:],extent=(self.z[0],self.z[-1],self.x[0],self.x[-1]))
self.zslice = self.ax3.imshow(u[:,:,0],extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]))
# Create and initialize x-slider
self.sliderax1 = self.fig.add_axes([0.125,0.08,0.225,0.03])
self.sliderx = DiscreteSlider(self.sliderax1,'',0,len(self.x)-1,increment=1,valinit=0)
self.sliderx.on_changed(self.update_x)
self.sliderx.set_val(0)
# Create and initialize y-slider
self.sliderax2 = self.fig.add_axes([0.4,0.08,0.225,0.03])
self.slidery = DiscreteSlider(self.sliderax2,'',0,len(self.y)-1,increment=1,valinit=0)
self.slidery.on_changed(self.update_y)
self.slidery.set_val(0)
# Create and initialize z-slider
self.sliderax3 = self.fig.add_axes([0.675,0.08,0.225,0.03])
self.sliderz = DiscreteSlider(self.sliderax3,'',0,len(self.z)-1,increment=1,valinit=0)
self.sliderz.on_changed(self.update_z)
self.sliderz.set_val(0)
z0,z1 = self.ax1.get_xlim()
x0,x1 = self.ax2.get_ylim()
y0,y1 = self.ax1.get_ylim()
self.ax1.set_aspect((z1-z0)/(y1-y0))
self.ax2.set_aspect((z1-z0)/(x1-x0))
self.ax3.set_aspect((x1-x0)/(y1-y0))
def xlabel(self,*args,**kwargs):
self.ax2.set_ylabel(*args,**kwargs)
self.ax3.set_xlabel(*args,**kwargs)
def ylabel(self,*args,**kwargs):
self.ax1.set_ylabel(*args,**kwargs)
self.ax3.set_ylabel(*args,**kwargs)
def zlabel(self,*args,**kwargs):
self.ax1.set_xlabel(*args,**kwargs)
self.ax2.set_xlabel(*args,**kwargs)
def update_x(self,value):
self.xslice.set_data(self.data[value,:,:])
self.yplot_xline.set_ydata(self.x[value])
self.zplot_xline.set_xdata(self.x[value])
def update_y(self,value):
self.yslice.set_data(self.data[:,value,:])
self.xplot_yline.set_ydata(self.y[value])
self.zplot_yline.set_ydata(self.y[value])
def update_z(self,value):
self.zslice.set_data(self.data[:,:,value])
self.xplot_zline.set_xdata(self.z[value])
self.yplot_zline.set_xdata(self.z[value])
def show(self):
plt.show()
if __name__ == '__main__':
# Number of x-grid points
nx = 100
# Number of
ny = 100
nz = 200
x = np.linspace(-4,4,nx)
y = np.linspace(-4,4,ny)
z = np.linspace(0,8,nz)
xx,yy,zz = meshgrid3(x,y,z)
# Display three cross sections of a Gaussian Beam/Paraxial wave
u = np.real(np.exp(-(2*xx**2+yy**2)/(.2+2j*zz))/np.sqrt(.2+2j*zz))
s3 = slice3(xx,yy,zz,u)
s3.xlabel('x',fontsize=18)
s3.ylabel('y',fontsize=18)
s3.zlabel('z',fontsize=18)
s3.show()
| |
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from datetime import datetime
from dateutil.tz import tzutc
import hashlib
import jmespath
import json
from .core import BaseAction
from c7n.utils import (
type_schema, local_session, chunks, dumps, filter_empty, get_partition)
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources as aws_resources
from c7n.version import version
FindingTypes = {
"Software and Configuration Checks",
"TTPs",
"Effects",
"Unusual Behaviors",
"Sensitive Data Identifications"
}
# Mostly undocumented value size limit
SECHUB_VALUE_SIZE_LIMIT = 1024
class PostFinding(BaseAction):
"""Report a finding to AWS Security Hub.
Custodian acts as a finding provider, allowing users to craft
policies that report to the AWS SecurityHub.
For resources that are taggable, we will tag the resource with an identifier
such that further findings generate updates.
Example generate a finding for accounts that don't have shield enabled.
:example:
.. code-block:: yaml
policies:
- name: account-shield-enabled
resource: account
filters:
- shield-enabled
actions:
- type: post-finding
description: |
Shield should be enabled on account to allow for DDOS protection (1 time 3k USD Charge).
severity_normalized: 6
types:
- "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)"
recommendation: "Enable shield"
recommendation_url: "https://www.example.com/policies/AntiDDoS.html"
confidence: 100
compliance_status: FAILED
""" # NOQA
FindingVersion = "2018-10-08"
ProductName = "default"
permissions = ('securityhub:BatchImportFindings',)
schema_alias = True
schema = type_schema(
"post-finding",
required=["types"],
title={"type": "string"},
description={'type': 'string'},
severity={"type": "number", 'default': 0},
severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0},
confidence={"type": "number", "min": 0, "max": 100},
criticality={"type": "number", "min": 0, "max": 100},
# Cross region aggregation
region={'type': 'string', 'description': 'cross-region aggregation target'},
recommendation={"type": "string"},
recommendation_url={"type": "string"},
fields={"type": "object"},
batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},
types={
"type": "array",
"minItems": 1,
"items": {"type": "string"},
},
compliance_status={
"type": "string",
"enum": ["PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"],
},
)
NEW_FINDING = 'New'
def validate(self):
for finding_type in self.data["types"]:
if finding_type.count('/') > 2 or finding_type.split('/')[0] not in FindingTypes:
raise PolicyValidationError(
"Finding types must be in the format 'namespace/category/classifier'."
" Found {}. Valid namespace values are: {}.".format(
finding_type, " | ".join([ns for ns in FindingTypes])))
def get_finding_tag(self, resource):
finding_tag = None
tags = resource.get('Tags', [])
finding_key = '{}:{}'.format('c7n:FindingId',
self.data.get('title', self.manager.ctx.policy.name))
# Support Tags as dictionary
if isinstance(tags, dict):
return tags.get(finding_key)
# Support Tags as list of {'Key': 'Value'}
for t in tags:
key = t['Key']
value = t['Value']
if key == finding_key:
finding_tag = value
return finding_tag
def group_resources(self, resources):
grouped_resources = {}
for r in resources:
finding_tag = self.get_finding_tag(r) or self.NEW_FINDING
grouped_resources.setdefault(finding_tag, []).append(r)
return grouped_resources
def process(self, resources, event=None):
region_name = self.data.get('region', self.manager.config.region)
client = local_session(
self.manager.session_factory).client(
"securityhub", region_name=region_name)
now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
# default batch size to one to work around security hub console issue
# which only shows a single resource in a finding.
batch_size = self.data.get('batch_size', 1)
stats = Counter()
for key, grouped_resources in self.group_resources(resources).items():
for resource_set in chunks(grouped_resources, batch_size):
stats['Finding'] += 1
if key == self.NEW_FINDING:
finding_id = None
created_at = now
updated_at = now
else:
finding_id, created_at = self.get_finding_tag(
resource_set[0]).split(':', 1)
updated_at = now
finding = self.get_finding(
resource_set, finding_id, created_at, updated_at)
import_response = client.batch_import_findings(
Findings=[finding])
if import_response['FailedCount'] > 0:
stats['Failed'] += import_response['FailedCount']
self.log.error(
"import_response=%s" % (import_response))
if key == self.NEW_FINDING:
stats['New'] += len(resource_set)
# Tag resources with new finding ids
tag_action = self.manager.action_registry.get('tag')
if tag_action is None:
continue
tag_action({
'key': '{}:{}'.format(
'c7n:FindingId',
self.data.get(
'title', self.manager.ctx.policy.name)),
'value': '{}:{}'.format(
finding['Id'], created_at)},
self.manager).process(resource_set)
else:
stats['Update'] += len(resource_set)
self.log.debug(
"policy:%s securityhub %d findings resources %d new %d updated %d failed",
self.manager.ctx.policy.name,
stats['Finding'],
stats['New'],
stats['Update'],
stats['Failed'])
def get_finding(self, resources, existing_finding_id, created_at, updated_at):
policy = self.manager.ctx.policy
model = self.manager.resource_type
region = self.data.get('region', self.manager.config.region)
if existing_finding_id:
finding_id = existing_finding_id
else:
finding_id = '{}/{}/{}/{}'.format(
self.manager.config.region,
self.manager.config.account_id,
hashlib.md5(json.dumps(
policy.data).encode('utf8')).hexdigest(),
hashlib.md5(json.dumps(list(sorted(
[r[model.id] for r in resources]))).encode(
'utf8')).hexdigest())
finding = {
"SchemaVersion": self.FindingVersion,
"ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format(
region,
self.manager.config.account_id,
self.manager.config.account_id,
self.ProductName,
),
"AwsAccountId": self.manager.config.account_id,
# Long search chain for description values, as this was
# made required long after users had policies deployed, so
# use explicit description, or policy description, or
# explicit title, or policy name, in that order.
"Description": self.data.get(
"description", policy.data.get(
"description",
self.data.get('title', policy.name))).strip(),
"Title": self.data.get("title", policy.name),
'Id': finding_id,
"GeneratorId": policy.name,
'CreatedAt': created_at,
'UpdatedAt': updated_at,
"RecordState": "ACTIVE",
}
severity = {'Product': 0, 'Normalized': 0}
if self.data.get("severity") is not None:
severity["Product"] = self.data["severity"]
if self.data.get("severity_normalized") is not None:
severity["Normalized"] = self.data["severity_normalized"]
if severity:
finding["Severity"] = severity
recommendation = {}
if self.data.get("recommendation"):
recommendation["Text"] = self.data["recommendation"]
if self.data.get("recommendation_url"):
recommendation["Url"] = self.data["recommendation_url"]
if recommendation:
finding["Remediation"] = {"Recommendation": recommendation}
if "confidence" in self.data:
finding["Confidence"] = self.data["confidence"]
if "criticality" in self.data:
finding["Criticality"] = self.data["criticality"]
if "compliance_status" in self.data:
finding["Compliance"] = {"Status": self.data["compliance_status"]}
fields = {
'resource': policy.resource_type,
'ProviderName': 'CloudCustodian',
'ProviderVersion': version
}
if "fields" in self.data:
fields.update(self.data["fields"])
else:
tags = {}
for t in policy.tags:
if ":" in t:
k, v = t.split(":", 1)
else:
k, v = t, ""
tags[k] = v
fields.update(tags)
if fields:
finding["ProductFields"] = fields
finding_resources = []
for r in resources:
finding_resources.append(self.format_resource(r))
finding["Resources"] = finding_resources
finding["Types"] = list(self.data["types"])
return filter_empty(finding)
def format_resource(self, r):
raise NotImplementedError("subclass responsibility")
class OtherResourcePostFinding(PostFinding):
fields = ()
def format_resource(self, r):
details = {}
for k in r:
if isinstance(k, (list, dict)):
continue
details[k] = r[k]
for f in self.fields:
value = jmespath.search(f['expr'], r)
if not value:
continue
details[f['key']] = value
for k, v in details.items():
if isinstance(v, datetime):
v = v.isoformat()
elif isinstance(v, (list, dict)):
v = dumps(v)
elif isinstance(v, (int, float, bool)):
v = str(v)
else:
continue
details[k] = v[:SECHUB_VALUE_SIZE_LIMIT]
details['c7n:resource-type'] = self.manager.type
other = {
'Type': 'Other',
'Id': self.manager.get_arns([r])[0],
'Region': self.manager.config.region,
'Partition': get_partition(self.manager.config.region),
'Details': {'Other': filter_empty(details)}
}
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags:
other['Tags'] = tags
return other
@classmethod
def register_resource(klass, registry, event):
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if 'post-finding' in resource_manager.action_registry:
continue
resource_manager.action_registry.register('post-finding', klass)
aws_resources.subscribe(
aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource)
| |
from sdf import *
import os
def generate(f, name, samples=2**26, **kwargs):
os.makedirs('models', exist_ok=True)
os.makedirs('images', exist_ok=True)
stl_path = 'models/%s.stl' % name
png_path = 'images/%s.png' % name
if os.path.exists(png_path):
return
render_cmd = './render %s %s' % (stl_path, png_path)
f.save(stl_path, samples=samples, **kwargs)
os.system(render_cmd)
# example
f = sphere(1) & box(1.5)
c = cylinder(0.5)
f -= c.orient(X) | c.orient(Y) | c.orient(Z)
example = f
generate(f, 'example')
# sphere(radius=1, center=ORIGIN)
f = sphere(1)
generate(f, 'sphere')
# box(size=1, center=ORIGIN, a=None, b=None)
f = box(1)
generate(f, 'box')
f = box((1, 2, 3))
generate(f, 'box2')
# rounded_box(size, radius)
f = rounded_box((1, 2, 3), 0.25)
generate(f, 'rounded_box')
# wireframe_box(size, thickness)
f = wireframe_box((1, 2, 3), 0.05)
generate(f, 'wireframe_box')
# torus(r1, r2)
f = torus(1, 0.25)
generate(f, 'torus')
# capsule(a, b, radius)
f = capsule(-Z, Z, 0.5)
generate(f, 'capsule')
# capped_cylinder(a, b, radius)
f = capped_cylinder(-Z, Z, 0.5)
generate(f, 'capped_cylinder')
# rounded_cylinder(ra, rb, h)
f = rounded_cylinder(0.5, 0.1, 2)
generate(f, 'rounded_cylinder')
# capped_cone(a, b, ra, rb)
f = capped_cone(-Z, Z, 1, 0.5)
generate(f, 'capped_cone')
# rounded_cone(r1, r2, h)
f = rounded_cone(0.75, 0.25, 2)
generate(f, 'rounded_cone')
# ellipsoid(size)
f = ellipsoid((1, 2, 3))
generate(f, 'ellipsoid')
# pyramid(h)
f = pyramid(1)
generate(f, 'pyramid')
# tetrahedron(r)
f = tetrahedron(1)
generate(f, 'tetrahedron')
# octahedron(r)
f = octahedron(1)
generate(f, 'octahedron')
# dodecahedron(r)
f = dodecahedron(1)
generate(f, 'dodecahedron')
# icosahedron(r)
f = icosahedron(1)
generate(f, 'icosahedron')
# plane(normal=UP, point=ORIGIN)
f = sphere() & plane()
generate(f, 'plane')
# slab(x0=None, y0=None, z0=None, x1=None, y1=None, z1=None, k=None)
f = sphere() & slab(z0=-0.5, z1=0.5, x0=0)
generate(f, 'slab')
# cylinder(radius)
f = sphere() - cylinder(0.5)
generate(f, 'cylinder')
# translate(other, offset)
f = sphere().translate((0, 0, 2))
generate(f, 'translate')
# scale(other, factor)
f = sphere().scale((1, 2, 3))
generate(f, 'scale')
# rotate(other, angle, vector=Z)
# rotate_to(other, a, b)
f = capped_cylinder(-Z, Z, 0.5).rotate(pi / 4, X)
generate(f, 'rotate')
# orient(other, axis)
c = capped_cylinder(-Z, Z, 0.25)
f = c.orient(X) | c.orient(Y) | c.orient(Z)
generate(f, 'orient')
# boolean operations
a = box((3, 3, 0.5))
b = sphere()
# union
f = a | b
generate(f, 'union')
# difference
f = a - b
generate(f, 'difference')
# intersection
f = a & b
generate(f, 'intersection')
# smooth union
f = a | b.k(0.25)
generate(f, 'smooth_union')
# smooth difference
f = a - b.k(0.25)
generate(f, 'smooth_difference')
# smooth intersection
f = a & b.k(0.25)
generate(f, 'smooth_intersection')
# repeat(other, spacing, count=None, padding=0)
f = sphere().repeat(3, (1, 1, 0))
generate(f, 'repeat')
# circular_array(other, count, offset)
f = capped_cylinder(-Z, Z, 0.5).circular_array(8, 4)
generate(f, 'circular_array')
# blend(a, *bs, k=0.5)
f = sphere().blend(box())
generate(f, 'blend')
# dilate(other, r)
f = example.dilate(0.1)
generate(f, 'dilate')
# erode(other, r)
f = example.erode(0.1)
generate(f, 'erode')
# shell(other, thickness)
f = sphere().shell(0.05) & plane(-Z)
generate(f, 'shell')
# elongate(other, size)
f = example.elongate((0.25, 0.5, 0.75))
generate(f, 'elongate')
# twist(other, k)
f = box().twist(pi / 2)
generate(f, 'twist')
# bend(other, k)
f = box().bend(1)
generate(f, 'bend')
# bend_linear(other, p0, p1, v, e=ease.linear)
f = capsule(-Z * 2, Z * 2, 0.25).bend_linear(-Z, Z, X, ease.in_out_quad)
generate(f, 'bend_linear')
# bend_radial(other, r0, r1, dz, e=ease.linear)
f = box((5, 5, 0.25)).bend_radial(1, 2, -1, ease.in_out_quad)
generate(f, 'bend_radial', sparse=False)
# transition_linear(f0, f1, p0=-Z, p1=Z, e=ease.linear)
f = box().transition_linear(sphere(), e=ease.in_out_quad)
generate(f, 'transition_linear')
# transition_radial(f0, f1, r0=0, r1=1, e=ease.linear)
f = box().transition_radial(sphere(), e=ease.in_out_quad)
generate(f, 'transition_radial')
# extrude(other, h)
f = hexagon(1).extrude(1)
generate(f, 'extrude')
# extrude_to(a, b, h, e=ease.linear)
f = rectangle(2).extrude_to(circle(1), 2, ease.in_out_quad)
generate(f, 'extrude_to')
# revolve(other, offset=0)
f = hexagon(1).revolve(3)
generate(f, 'revolve')
# slice(other)
f = example.translate((0, 0, 0.55)).slice().extrude(0.1)
generate(f, 'slice')
# text(name, text, width=None, height=None, texture_point_size=512)
f = rounded_box((7, 2, 0.2), 0.1)
f -= text('Georgia', 'Hello, World!').extrude(0.2).rotate(pi).translate(0.1 * Z)
generate(f, 'text')
# wrap_around(other, x0, x1, r=None, e=ease.linear)
FONT = 'Arial'
TEXT = ' wrap_around ' * 3
w, h = measure_text(FONT, TEXT)
f = text(FONT, TEXT).extrude(0.1).orient(Y).wrap_around(-w / 2, w / 2)
generate(f, 'wrap_around')
| |
from django.core.exceptions import PermissionDenied
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from geokey.core.decorators import handle_exceptions_for_ajax
from geokey.users.models import User
from geokey.projects.models import Project
from ..renderer.geojson import GeoJsonRenderer
from ..parsers.geojson import GeoJsonParser
from .base import SingleAllContribution
from ..serializers import ContributionSerializer
class GeoJsonView(APIView):
renderer_classes = (GeoJsonRenderer,)
parser_classes = (GeoJsonParser,)
class ProjectObservations(GeoJsonView):
"""
Public API endpoint to add new contributions to a project
/api/projects/:project_id/contributions
"""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
Adds a new contribution to a project
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised contribution
"""
user = request.user
if user.is_anonymous():
user = User.objects.get(display_name='AnonymousUser')
data = request.DATA
project = Project.objects.as_contributor(request.user, project_id)
if (not data.get('meta').get('status') == 'draft' and
project.can_moderate(user)):
data['meta']['status'] = 'active'
serializer = ContributionSerializer(
data=data, context={'user': user, 'project': project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
Returns a list of contributions for a project
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised observations
"""
q = request.GET.get('search')
s = request.GET.get('subset')
project = Project.objects.get_single(request.user, project_id)
contributions = project.get_all_contributions(
request.user,
search=q,
subset=s
)
serializer = ContributionSerializer(
contributions,
many=True,
context={'user': request.user, 'project': project, 'search': q}
)
return Response(serializer.data, status=status.HTTP_200_OK)
# ############################################################################
#
# SINGLE CONTRIBUTIONS
#
# ############################################################################
class SingleContributionAPIView(GeoJsonView):
"""
Abstract APIView for handling requests to single observations
"""
def get_and_respond(self, request, observation):
"""
Returns a single contributions
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be returned
Returns
-------
rest_framework.response.Respone
Contains the serialised observation
"""
serializer = ContributionSerializer(
observation,
context={'user': request.user, 'project': observation.project}
)
return Response(serializer.data, status=status.HTTP_200_OK)
def update_and_respond(self, request, observation):
"""
Updates and returns a single contributions
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be returned
Returns
-------
rest_framework.response.Respone
Contains the updated serialised observation
"""
data = request.DATA
user = request.user
if user.is_anonymous():
user = User.objects.get(display_name='AnonymousUser')
new_status = None
if data.get('meta') is not None:
new_status = data.get('meta').get('status')
user_can_moderate = observation.project.can_moderate(user)
user_is_owner = (observation.creator == user)
under_review = observation.comments.filter(
review_status='open').exists()
if (new_status is not None and new_status != observation.status):
if not (
(new_status == 'pending' and
(user_is_owner or user_can_moderate)) or
(new_status == 'active' and
observation.status == 'draft' and user_is_owner) or
(new_status == 'active' and
observation.status == 'pending' and user_can_moderate)):
raise PermissionDenied('You are not allowed to update the '
'status of the contribution from "%s" '
'to "%s"' % (
observation.status,
new_status
))
elif not (user_is_owner or user_can_moderate):
raise PermissionDenied('You are not allowed to update the'
'contribution')
if new_status == 'active' and under_review:
data['meta']['status'] = 'review'
if ((new_status == 'active' and observation.status == 'draft') and
not user_can_moderate):
default_status = observation.category.default_status
data['meta']['status'] = default_status
serializer = ContributionSerializer(
observation,
data=data,
context={'user': user, 'project': observation.project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def delete_and_respond(self, request, observation):
"""
Deletes a single observation
Parameters
----------
request : rest_framework.request.Request
Represents the request
observation : geokey.contributions.models.Observation
Observation to be deleted
Returns
-------
rest_framework.response.Respone
Empty response indicating successful delete
"""
if (observation.creator == request.user or
observation.project.can_moderate(request.user)):
observation.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
raise PermissionDenied('You are not allowed to delete this'
'contribution')
class SingleAllContributionAPIView(
SingleAllContribution, SingleContributionAPIView):
"""
Public API endpoint for updating a single observation in a project
/api/projects/:project_id/observations/:observation_id
"""
@handle_exceptions_for_ajax
def get(self, request, project_id, observation_id):
"""
Returns a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Contains the serialised observation
"""
observation = self.get_object(request.user, project_id, observation_id)
return self.get_and_respond(request, observation)
@handle_exceptions_for_ajax
def patch(self, request, project_id, observation_id):
"""
Updates and returns a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Contains the updated serialised observation
"""
observation = self.get_object(request.user, project_id, observation_id)
return self.update_and_respond(request, observation)
@handle_exceptions_for_ajax
def delete(self, request, project_id, observation_id):
"""
Deletes a single contribution
Parameters
----------
request : rest_framework.request.Request
Represents the request
project_id : int
identifies the project in the data base
observation_id : int
identifies the observation in the data base
Returns
-------
rest_framework.response.Respone
Empty response indicating successful delete
"""
observation = self.get_object(request.user, project_id, observation_id)
return self.delete_and_respond(request, observation)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file.
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = module.params.get('filename')
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True):
if check and self.exists():
return []
cmd = ['create']
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
if self.force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if not self.resource:
return False
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
cmd.append('--no-headers')
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
result = self._execute_nofail(cmd)
if not result:
return False
return True
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
)
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
if result:
changed = True
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils import units
from nova.compute import arch
from nova.compute import claims
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
from nova.pci import manager as pci_manager
from nova import test
_VIRT_DRIVER_AVAIL_RESOURCES = {
'vcpus': 4,
'memory_mb': 512,
'local_gb': 6,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': None,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
host='fake-host',
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
hypervisor_type='fake',
hypervisor_version=0,
hypervisor_hostname='fake-host',
free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
current_workload=0,
running_vms=0,
cpu_info='{}',
disk_available_least=0,
host_ip='1.1.1.1',
supported_hv_specs=[
objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM])
],
metrics=None,
pci_device_pools=None,
extra_resources=None,
stats={},
numa_topology=None,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
),
]
_INSTANCE_TYPE_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
'name': 'fake1.small',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
2: {
'id': 2,
'flavorid': 'fakeid-2',
'name': 'fake1.medium',
'memory_mb': 256,
'vcpus': 2,
'root_gb': 5,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
}
_INSTANCE_TYPE_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium',
memory_mb=256, vcpus=2, root_gb=5,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
}
_2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {
'2mb': objects.NUMATopologyLimits(id=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0),
}
_NUMA_PAGE_TOPOLOGIES = {
'2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}
_NUMA_HOST_TOPOLOGIES = {
'2mb': objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([]))]),
}
_INSTANCE_FIXTURES = [
objects.Instance(
id=1,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
),
objects.Instance(
id=2,
host=None,
node=None,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
),
]
_MIGRATION_FIXTURES = {
# A migration that has only this compute node as the source host
'source-only': objects.Migration(
id=1,
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
source_compute='fake-host',
dest_compute='other-host',
source_node='fake-node',
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has only this compute node as the dest host
'dest-only': objects.Migration(
id=2,
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as both the source and dest host
'source-and-dest': objects.Migration(
id=3,
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
source_compute='fake-host',
dest_compute='fake-host',
source_node='fake-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as destination and is an evac
'dest-only-evac': objects.Migration(
id=4,
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=2,
new_instance_type_id=None,
migration_type='evacuation',
status='pre-migrating'
),
}
_MIGRATION_INSTANCE_FIXTURES = {
# source-only
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
id=101,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# source-and-dest
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
id=3,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only-evac
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
}
_MIGRATION_CONTEXT_FIXTURES = {
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext(
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext(
instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext(
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
migration_id=1,
new_numa_topology=None,
old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']),
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext(
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
migration_id=2,
new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
old_numa_topology=None),
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext(
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
migration_id=2,
new_numa_topology=None,
old_numa_topology=None),
}
def overhead_zero(instance):
# Emulate that the driver does not adjust the memory
# of the instance...
return {
'memory_mb': 0
}
def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
"""Sets up the resource tracker instance with mock fixtures.
:param virt_resources: Optional override of the resource representation
returned by the virt driver's
`get_available_resource()` method.
:param estimate_overhead: Optional override of a function that should
return overhead of memory given an instance
object. Defaults to returning zero overhead.
"""
sched_client_mock = mock.MagicMock()
notifier_mock = mock.MagicMock()
vd = mock.MagicMock()
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
vd.estimate_instance_overhead.side_effect = estimate_overhead
with test.nested(
mock.patch('nova.scheduler.client.SchedulerClient',
return_value=sched_client_mock),
mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)):
rt = resource_tracker.ResourceTracker(hostname, vd, nodename)
return (rt, sched_client_mock, vd)
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='1.1.1.1')
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
(self.rt, self.sched_client_mock,
self.driver_mock) = setup_rt(
'fake-host', 'fake-node', virt_resources, estimate_overhead)
class TestUpdateAvailableResources(BaseTestCase):
def _update_available_resources(self):
# We test RT._update separately, since the complexity
# of the update_available_resource() function is high enough as
# it is, we just want to focus here on testing the resources
# parameter that update_available_resource() eventually passes
# to _update().
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_available_resource(mock.sentinel.ctx)
return update_mock
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
vd = self.driver_mock
vd.get_available_resource.assert_called_once_with('fake-node')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node',
expected_attrs=[
'system_metadata',
'numa_topology',
'flavor',
'migration_context'])
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_and_ram(
self, get_mock, migr_mock, get_cn_mock):
self.flags(reserved_host_disk_mb=1024,
reserved_host_memory_mb=512)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1, # 0GB used + 1 GB reserved
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=1,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = _INSTANCE_FIXTURES
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6 - 1 used
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 1,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_orphaned_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(memory_mb_used=64)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Orphaned instances are those that the virt driver has on
# record as consuming resources on the compute node, but the
# Nova database has no record of the instance being active
# on the host. For some reason, the resource tracker only
# considers orphaned instance's memory usage in its calculations
# of free resources...
orphaned_usages = {
'71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
# Yes, the return result format of get_per_instance_usage
# is indeed this stupid and redundant. Also note that the
# libvirt driver just returns an empty dict always for this
# method and so who the heck knows whether this stuff
# actually works.
'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
'memory_mb': 64
}
}
vd = self.driver_mock
vd.get_per_instance_usage.return_value = orphaned_usages
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_source_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the source host not the destination host, and the resource
# tracker does not have any instances assigned to it. This is
# the case when a migration from this compute host to another
# has been completed, but the user has not confirmed the resize
# yet, so the resource tracker must continue to keep the resources
# for the original instance type available on the source compute
# node in case of a revert of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['source-only']
migr_mock.return_value = [migr_obj]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Migration.instance property is accessed in the migration
# processing code, and this property calls
# objects.Instance.get_by_uuid, so we have the migration return
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 1,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but the user has not confirmed the resize
# yet, so the resource tracker must reserve the resources
# for the possibly-to-be-confirmed instance's instance type
# node in case of a confirm of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active evacuation that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but not finished yet.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only-evac']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_source_and_dest_migration(self, get_mock,
get_inst_mock, migr_mock,
get_cn_mock,
get_mig_ctxt_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host AND the source host, and the resource
# tracker has a few instances assigned to it, including the
# instance that is resizing to this same compute node. The tracking
# of resource amounts takes into account both the old and new
# resize instance types as taking up space on the node.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=512,
local_gb_used=7)
self._setup_rt(virt_resources=virt_resources)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
# The resizing instance has already had its instance type
# changed to the *new* instance type (the bigger one, instance type 2)
resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
resizing_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid])
all_instances = _INSTANCE_FIXTURES + [resizing_instance]
get_mock.return_value = all_instances
get_inst_mock.return_value = resizing_instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
# 6 total - 1G existing - 5G new flav - 1G old flav
'free_disk_gb': -1,
'hypervisor_version': 0,
'local_gb': 6,
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 4,
'hypervisor_type': 'fake',
'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
'memory_mb': 512,
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_no_op_init_compute_node(self, get_mock, service_mock,
create_mock):
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(service_mock.called)
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_loaded(self, get_mock, create_mock):
self._setup_rt()
def fake_get_node(_ctx, host, node):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, create_mock):
self._setup_rt()
get_mock.side_effect = exc.NotFound
cpu_alloc_ratio = 1.0
ram_alloc_ratio = 1.0
disk_alloc_ratio = 1.0
resources = {
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0,
'pci_passthrough_devices': '[]'
}
# The expected compute represents the initial values used
# when creating a compute node.
expected_compute = objects.ComputeNode(
host_ip=resources['host_ip'],
vcpus=resources['vcpus'],
memory_mb=resources['memory_mb'],
local_gb=resources['local_gb'],
cpu_info=resources['cpu_info'],
vcpus_used=resources['vcpus_used'],
memory_mb_used=resources['memory_mb_used'],
local_gb_used=resources['local_gb_used'],
numa_topology=resources['numa_topology'],
hypervisor_type=resources['hypervisor_type'],
hypervisor_version=resources['hypervisor_version'],
hypervisor_hostname=resources['hypervisor_hostname'],
# NOTE(sbauza): ResourceTracker adds host field
host='fake-host',
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=ram_alloc_ratio,
cpu_allocation_ratio=cpu_alloc_ratio,
disk_allocation_ratio=disk_alloc_ratio,
)
# Forcing the flags to the values we know
self.rt.ram_allocation_ratio = ram_alloc_ratio
self.rt.cpu_allocation_ratio = cpu_alloc_ratio
self.rt.disk_allocation_ratio = disk_alloc_ratio
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute,
self.rt.compute_node))
def test_copy_resources_adds_allocation_ratios(self):
self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0,
disk_allocation_ratio=2.0)
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._copy_resources(resources)
self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio)
self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio)
self.assertEqual(2.0, self.rt.compute_node.disk_allocation_ratio)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_same_resources(self, service_mock):
self._setup_rt()
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# needlessly called when the resources don't actually change.
compute = objects.ComputeNode(
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=6,
hypervisor_version=0,
local_gb=6,
free_ram_mb=512,
memory_mb_used=0,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=0,
hypervisor_type='fake',
local_gb_used=0,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
# The above call to _update() will populate the
# RT.old_resources collection with the resources. Here, we check that
# if we call _update() again with the same resources, that
# the scheduler client won't be called again to update those
# (unchanged) resources for the compute node
self.sched_client_mock.reset_mock()
urs_mock = self.sched_client_mock.update_resource_stats
self.rt._update(mock.sentinel.ctx)
self.assertFalse(urs_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_new_resources(self, service_mock):
self._setup_rt()
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
compute = objects.ComputeNode(
host='fake-host',
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=2,
hypervisor_version=0,
local_gb=6,
free_ram_mb=384,
memory_mb_used=128,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=2,
hypervisor_type='fake',
local_gb_used=4,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
urs_mock = self.sched_client_mock.update_resource_stats
urs_mock.assert_called_once_with(self.rt.compute_node)
class TestInstanceClaim(BaseTestCase):
def setUp(self):
super(TestInstanceClaim, self).setUp()
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
# not using mock.sentinel.ctx because instance_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def test_claim_disabled(self):
self.rt.compute_node = None
self.assertTrue(self.rt.disabled)
with mock.patch.object(self.instance, 'save'):
claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
None)
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(self.rt.nodename, self.instance.node)
self.assertIsInstance(claim, claims.NopClaim)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_with_claim(self, migr_mock, pci_mock):
# Test that RT.update_usage() only changes the compute node
# resources if there has been a claim first.
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_removed(self, migr_mock, pci_mock):
# Test that RT.update_usage() removes the instance when update is
# called in a removed state
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_updated['pci_device_pools'] = objects.PciDevicePoolList()
self.instance.vm_state = vm_states.SHELVED_OFFLOADED
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected_updated,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_with_pci(self, migr_mock, pci_mock,
pci_manager_mock, pci_stats_mock):
# Test that a claim involving PCI requests correctly claims
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
self.assertFalse(self.rt.disabled)
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
self.rt.pci_tracker = pci_manager.PciDevTracker(mock.sentinel.ctx)
pci_pools = objects.PciDevicePoolList()
pci_manager_mock.return_value = pci_pools
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
pci_mock.return_value = objects.InstancePCIRequests(requests=[request])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': pci_pools
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
pci_manager_mock.assert_called_once_with(mock.ANY, # context...
pci_mock.return_value,
None)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort_context_manager(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
def _doit(mock_clone):
with self.rt.instance_claim(self.ctx, self.instance, None):
# Raise an exception. Just make sure below that the abort()
# method of the claim object was called (and the resulting
# resources reset to the pre-claimed amounts)
raise test.TestingException()
self.assertRaises(test.TestingException, _doit)
self.assertEqual(2, mock_save.call_count)
mock_clear_numa.assert_called_once_with()
self.assertIsNone(self.instance.host)
self.assertIsNone(self.instance.node)
# Assert that the resources claimed by the Claim() constructor
# are returned to the resource tracker due to the claim's abort()
# method being called when triggered by the exception raised above.
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
@mock.patch.object(self.instance, 'save')
def _claim(mock_save, mock_clone):
return self.rt.instance_claim(self.ctx, self.instance, None)
claim = _claim()
self.assertEqual(disk_used, self.rt.compute_node.local_gb_used)
self.assertEqual(self.instance.memory_mb,
self.rt.compute_node.memory_mb_used)
self.assertEqual(1, self.rt.compute_node.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
def _abort():
claim.abort()
_abort()
mock_save.assert_called_once_with()
mock_clear_numa.assert_called_once_with()
self.assertIsNone(self.instance.host)
self.assertIsNone(self.instance.node)
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_limits(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'],
'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'],
'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'],
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
bad_limits[key] = 0
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim,
self.ctx, self.instance, bad_limits)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_numa(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb']
host_topology = _NUMA_HOST_TOPOLOGIES['2mb']
self.rt.compute_node['numa_topology'] = host_topology._to_json()
limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']}
expected_numa = copy.deepcopy(host_topology)
for cell in expected_numa.cells:
cell.memory_usage += _2MB
cell.cpu_usage += 1
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, limits)
update_mock.assert_called_once_with(self.ctx.elevated())
updated_compute_node = self.rt.compute_node
new_numa = updated_compute_node['numa_topology']
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
class TestMoveClaim(BaseTestCase):
def setUp(self):
super(TestMoveClaim, self).setUp()
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1]
self.limits = {}
# not using mock.sentinel.ctx because resize_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
# Initialise extensible resource trackers
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
with test.nested(
mock.patch('nova.objects.InstanceList.get_by_host_and_node'),
mock.patch('nova.objects.MigrationList.'
'get_in_progress_by_host_and_node')
) as (inst_list_mock, migr_mock):
inst_list_mock.return_value = objects.InstanceList(objects=[])
migr_mock.return_value = objects.MigrationList(objects=[])
self.rt.update_available_resource(self.ctx)
def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.inst_list_mock = inst_list_mock
self.inst_by_uuid = inst_by_uuid
self.migr_mock = migr_mock
self.inst_save_mock = inst_save_mock
def audit(self, rt, instances, migrations, migr_inst):
self.inst_list_mock.return_value = \
objects.InstanceList(objects=instances)
self.migr_mock.return_value = \
objects.MigrationList(objects=migrations)
self.inst_by_uuid.return_value = migr_inst
rt.update_available_resource(self.ctx)
def assertEqual(self, expected, actual):
if type(expected) != dict or type(actual) != dict:
super(TestMoveClaim, self).assertEqual(expected, actual)
return
fail = False
for k, e in expected.items():
a = actual[k]
if e != a:
print("%s: %s != %s" % (k, e, a))
fail = True
if fail:
self.fail()
def adjust_expected(self, expected, flavor):
disk_used = flavor['root_gb'] + flavor['ephemeral_gb']
expected.free_disk_gb -= disk_used
expected.local_gb_used += disk_used
expected.free_ram_mb -= flavor['memory_mb']
expected.memory_mb_used += flavor['memory_mb']
expected.vcpus_used += flavor['vcpus']
@mock.patch('nova.objects.Flavor.get_by_id')
def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance and check that the expected quantities of each
resource have been consumed.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-ip"
flavor_mock.return_value = objects.Flavor(**self.flavor)
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = _MIGRATION_FIXTURES['source-only']
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertEqual(1, ctxt_mock.call_count)
self.assertIsInstance(claim, claims.MoveClaim)
inst_save_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_claim_abort(self, pci_mock, inst_list_mock,
inst_by_uuid, migr_mock, inst_save_mock):
# Resize self.instance and check that the expected quantities of each
# resource have been consumed. The abort the resize claim and check
# that the resources have been set back to their original values.
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-host"
migr_obj = _MIGRATION_FIXTURES['dest-only']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
with mock.patch.object(self.rt, '_create_migration') as migr_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertIsInstance(claim, claims.MoveClaim)
self.assertEqual(5, self.rt.compute_node.local_gb_used)
self.assertEqual(256, self.rt.compute_node.memory_mb_used)
self.assertEqual(1, len(self.rt.tracked_migrations))
with mock.patch('nova.objects.Instance.'
'drop_migration_context') as drop_migr_mock:
claim.abort()
drop_migr_mock.assert_called_once_with()
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, len(self.rt.tracked_migrations))
def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance to the same host but with a different flavor.
Then abort the claim. Check that the same amount of resources are
available afterwards as we started with.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance._context = self.ctx
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
expected = copy.deepcopy(self.rt.compute_node)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(self.ctx, self.instance,
_INSTANCE_TYPE_OBJ_FIXTURES[1], None)
self.assertEqual(1, ctxt_mock.call_count)
self.audit(self.rt, [self.instance], [migr_obj], self.instance)
inst_save_mock.assert_called_once_with()
self.assertNotEqual(expected, self.rt.compute_node)
claim.instance.migration_context = mig_context_obj
with mock.patch('nova.objects.MigrationContext._destroy') as destroy_m:
claim.abort()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
destroy_m.assert_called_once_with(self.ctx, claim.instance.uuid)
def test_revert_reserve_source(
self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock):
"""Check that the source node of an instance migration reserves
resources until the migration has completed, even if the migration is
reverted.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
# Get our migrations, instances and itypes in a row
src_migr = _MIGRATION_FIXTURES['source-only']
src_instance = (
_MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']].obj_clone()
)
src_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[src_instance.uuid])
old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']]
dst_migr = _MIGRATION_FIXTURES['dest-only']
dst_instance = (
_MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']].obj_clone()
)
new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']]
dst_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[dst_instance.uuid])
# Set up the destination resource tracker
# update_available_resource to initialise extensible resource trackers
src_rt = self.rt
(dst_rt, _, _) = setup_rt("other-host", "other-node")
dst_rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
inst_list_mock.return_value = objects.InstanceList(objects=[])
dst_rt.update_available_resource(self.ctx)
# Register the instance with dst_rt
expected = copy.deepcopy(dst_rt.compute_node)
with mock.patch.object(dst_instance, 'save'):
dst_rt.instance_claim(self.ctx, dst_instance)
self.adjust_expected(expected, new_itype)
expected.stats = {'num_task_resize_migrating': 1,
'io_workload': 1,
'num_instances': 1,
'num_proj_fake-project': 1,
'num_vm_active': 1,
'num_os_type_fake-os': 1}
expected.current_workload = 1
expected.running_vms = 1
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Provide the migration via a mock, then audit dst_rt to check that
# the instance + migration resources are not double-counted
self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Audit src_rt with src_migr
expected = copy.deepcopy(src_rt.compute_node)
self.adjust_expected(expected, old_itype)
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
# Flag the instance as reverting and re-audit
src_instance['vm_state'] = vm_states.RESIZED
src_instance['task_state'] = task_states.RESIZE_REVERTING
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
def test_update_available_resources_migration_no_context(self, pci_mock,
inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock):
"""When migrating onto older nodes - it is possible for the
migration_context record to be missing. Confirm resource audit works
regardless.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = None
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
self.audit(self.rt, [], [migr_obj], self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
# This is good enough to prevent a lazy-load; value is unimportant
migr_obj['updated_at'] = None
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[self.instance.uuid])
self.audit(self.rt, [], [migr_obj, migr_obj], self.instance)
self.assertEqual(1, len(self.rt.tracked_migrations))
class TestInstanceInResizeState(test.NoDBTestCase):
def test_active_suspending(self):
instance = objects.Instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(resource_tracker._instance_in_resize_state(instance))
def test_resized_suspending(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_migrating(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_MIGRATING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_finish(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_FINISH)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from saharaclient.api import base as sahara_base
from rally import consts
from rally import exceptions
from rally.plugins.openstack.scenarios.sahara import utils
from tests.unit import test
CONF = cfg.CONF
SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils"
class SaharaScenarioTestCase(test.ScenarioTestCase):
# NOTE(stpierre): the Sahara utils generally do funny stuff with
# wait_for() calls -- frequently the the is_ready and
# update_resource arguments are functions defined in the Sahara
# utils themselves instead of the more standard resource_is() and
# get_from_manager() calls. As a result, the tests below do more
# integrated/functional testing of wait_for() calls, and we can't
# just mock out wait_for and friends the way we usually do.
patch_benchmark_utils = False
def setUp(self):
super(SaharaScenarioTestCase, self).setUp()
CONF.set_override("sahara_cluster_check_interval", 0, "benchmark",
enforce_type=True)
CONF.set_override("sahara_job_check_interval", 0, "benchmark",
enforce_type=True)
def test_list_node_group_templates(self):
ngts = []
self.clients("sahara").node_group_templates.list.return_value = ngts
scenario = utils.SaharaScenario(self.context)
return_ngts_list = scenario._list_node_group_templates()
self.assertEqual(ngts, return_ngts_list)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.list_node_group_templates")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_create_node_group_templates(
self, mock_sahara_consts,
mock_generate_random_name):
scenario = utils.SaharaScenario(self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_sahara_consts.NODE_PROCESSES = mock_processes
scenario._create_master_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version"
)
scenario._create_worker_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version"
)
create_calls = [
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p1"]),
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p2"]
)]
self.clients("sahara").node_group_templates.create.assert_has_calls(
create_calls)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_master_node_group_template")
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_worker_node_group_template")
def test_delete_node_group_templates(self):
scenario = utils.SaharaScenario(self.context)
ng = mock.MagicMock(id=42)
scenario._delete_node_group_template(ng)
delete_mock = self.clients("sahara").node_group_templates.delete
delete_mock.assert_called_once_with(42)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_node_group_template")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster(self, mock_sahara_consts,
mock_generate_random_name):
self.context.update({
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
})
self.clients("services").values.return_value = [
consts.Service.NEUTRON
]
scenario = utils.SaharaScenario(context=self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor_m",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}, {
"name": "worker-ng",
"flavor_id": "test_flavor_w",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 42,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}
]
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value.id = (
"test_cluster_id")
self.clients("sahara").clusters.get.return_value.status = (
"active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
master_flavor_id="test_flavor_m",
worker_flavor_id="test_flavor_w",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}}
)
self.clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_with_proxy(self, mock_sahara_consts,
mock_generate_random_name):
context = {
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
}
self.clients("services").values.return_value = [
consts.Service.NEUTRON
]
scenario = utils.SaharaScenario(context=context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor_m",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"is_proxy_gateway": True
}, {
"name": "worker-ng",
"flavor_id": "test_flavor_w",
"node_processes": ["p2"],
"volumes_per_node": 5,
"volumes_size": 10,
"count": 40,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}, {
"name": "proxy-ng",
"flavor_id": "test_flavor_w",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 2,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"is_proxy_gateway": True
}
]
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
status="active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
master_flavor_id="test_flavor_m",
worker_flavor_id="test_flavor_w",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}},
enable_proxy=True
)
self.clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_error(self, mock_sahara_consts,
mock_generate_random_name):
scenario = utils.SaharaScenario(self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
status="error")
self.assertRaises(exceptions.GetResourceErrorStatus,
scenario._launch_cluster,
plugin_name="test_plugin",
hadoop_version="test_version",
master_flavor_id="test_flavor_m",
worker_flavor_id="test_flavor_w",
image_id="test_image",
floating_ip_pool="test_pool",
volumes_per_node=5,
volumes_size=10,
workers_count=42,
node_configs={"HDFS": {"local_config":
"local_value"}})
def test_scale_cluster(self):
scenario = utils.SaharaScenario(self.context)
cluster = mock.MagicMock(id=42, node_groups=[{
"name": "random_master",
"count": 1
}, {
"name": "random_worker",
"count": 41
}])
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
id=42,
status="active")
expected_scale_object = {
"resize_node_groups": [{
"name": "random_worker",
"count": 42
}]
}
scenario._scale_cluster(cluster, 1)
self.clients("sahara").clusters.scale.assert_called_once_with(
42, expected_scale_object)
def test_delete_cluster(self):
scenario = utils.SaharaScenario(self.context)
cluster = mock.MagicMock(id=42)
self.clients("sahara").clusters.get.side_effect = [
cluster, sahara_base.APIException()
]
scenario._delete_cluster(cluster)
delete_mock = self.clients("sahara").clusters.delete
delete_mock.assert_called_once_with(42)
cl_get_expected = mock.call(42)
self.clients("sahara").clusters.get.assert_has_calls([cl_get_expected,
cl_get_expected])
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="42")
def test_create_output_ds(self, mock_generate_random_name):
self.context.update({
"sahara": {
"output_conf": {
"output_type": "hdfs",
"output_url_prefix": "hdfs://test_out/"
}
}
})
scenario = utils.SaharaScenario(self.context)
scenario._create_output_ds()
self.clients("sahara").data_sources.create.assert_called_once_with(
name="42",
description="",
data_source_type="hdfs",
url="hdfs://test_out/42"
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="42")
def test_create_output_ds_swift(self, mock_generate_random_name):
self.context.update({
"sahara": {
"output_conf": {
"output_type": "swift",
"output_url_prefix": "swift://test_out/"
}
}
})
scenario = utils.SaharaScenario(self.context)
self.assertRaises(exceptions.RallyException,
scenario._create_output_ds)
def test_run_job_execution(self):
self.clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "SUCCESS"}, id="42")]
self.clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario(self.context)
scenario._run_job_execution(job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
self.clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
je_get_expected = mock.call("42")
self.clients("sahara").job_executions.get.assert_has_calls(
[je_get_expected, je_get_expected]
)
def test_run_job_execution_fail(self):
self.clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "killed"}, id="42")]
self.clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario(self.context)
self.assertRaises(exceptions.RallyException,
scenario._run_job_execution,
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
self.clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
| |
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import unittest
from adapt.parser import Parser
from adapt.entity_tagger import EntityTagger
from adapt.intent import IntentBuilder, resolve_one_of, choose_1_from_each
from adapt.tools.text.tokenizer import EnglishTokenizer
from adapt.tools.text.trie import Trie
__author__ = 'seanfitz'
class IntentTest(unittest.TestCase):
def setUp(self):
self.trie = Trie()
self.tokenizer = EnglishTokenizer()
self.regex_entities = []
self.tagger = EntityTagger(self.trie, self.tokenizer,
regex_entities=self.regex_entities)
self.trie.insert("play", ("play", "PlayVerb"))
self.trie.insert("stop", ("stop", "StopVerb"))
self.trie.insert("the big bang theory",
("the big bang theory", "Television Show"))
self.trie.insert("the big", ("the big", "Not a Thing"))
self.trie.insert("barenaked ladies",
("barenaked ladies", "Radio Station"))
self.trie.insert("show", ("show", "Command"))
self.trie.insert("what", ("what", "Question"))
self.parser = Parser(self.tokenizer, self.tagger)
def tearDown(self):
pass
def test_basic_intent(self):
intent = IntentBuilder("play television intent") \
.require("PlayVerb") \
.require("Television Show") \
.build()
for result in self.parser.parse("play the big bang theory"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('PlayVerb') == 'play'
assert result_intent.get('Television Show') == "the big bang theory"
def test_at_least_one(self):
intent = IntentBuilder("play intent") \
.require("PlayVerb") \
.one_of("Television Show", "Radio Station") \
.build()
for result in self.parser.parse("play the big bang theory"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('PlayVerb') == 'play'
assert result_intent.get('Television Show') == "the big bang theory"
for result in self.parser.parse("play the barenaked ladies"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('PlayVerb') == 'play'
assert result_intent.get('Radio Station') == "barenaked ladies"
def test_at_least_one_with_tag_in_multiple_slots(self):
self.trie.insert("temperature", ("temperature", "temperature"))
self.trie.insert("living room", ("living room", "living room"))
self.trie.insert("what is", ("what is", "what is"))
intent = IntentBuilder("test intent") \
.one_of("what is") \
.one_of("temperature", "living room") \
.one_of("temperature") \
.build()
for result in self.parser.parse(
"what is the temperature in the living room"):
result_intent = intent.validate(result.get("tags"),
result.get("confidence"))
assert result_intent.get("confidence") > 0.0
assert result_intent.get("temperature") == "temperature"
assert result_intent.get("living room") == "living room"
assert result_intent.get("what is") == "what is"
def test_at_least_on_no_required(self):
intent = IntentBuilder("play intent") \
.one_of("Television Show", "Radio Station") \
.build()
for result in self.parser.parse("play the big bang theory"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('Television Show') == "the big bang theory"
for result in self.parser.parse("play the barenaked ladies"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('Radio Station') == "barenaked ladies"
def test_at_least_one_alone(self):
intent = IntentBuilder("OptionsForLunch") \
.one_of("Question", "Command") \
.build()
for result in self.parser.parse("show"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('Command') == "show"
def test_basic_intent_with_alternate_names(self):
intent = IntentBuilder("play television intent") \
.require("PlayVerb", "Play Verb") \
.require("Television Show", "series") \
.build()
for result in self.parser.parse("play the big bang theory"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('Play Verb') == 'play'
assert result_intent.get('series') == "the big bang theory"
def test_intent_with_regex_entity(self):
self.trie = Trie()
self.tagger = EntityTagger(self.trie, self.tokenizer,
self.regex_entities)
self.parser = Parser(self.tokenizer, self.tagger)
self.trie.insert("theory", ("theory", "Concept"))
regex = re.compile(r"the (?P<Event>.*)")
self.regex_entities.append(regex)
intent = IntentBuilder("mock intent") \
.require("Event") \
.require("Concept").build()
for result in self.parser.parse("the big bang theory"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('Event') == 'big bang'
assert result_intent.get('Concept') == "theory"
def test_intent_using_alias(self):
self.trie.insert("big bang", ("the big bang theory", "Television Show"))
intent = IntentBuilder("play television intent") \
.require("PlayVerb", "Play Verb") \
.require("Television Show", "series") \
.build()
for result in self.parser.parse("play the big bang theory"):
result_intent = intent.validate(result.get('tags'),
result.get('confidence'))
assert result_intent.get('confidence') > 0.0
assert result_intent.get('Play Verb') == 'play'
assert result_intent.get('series') == "the big bang theory"
def test_resolve_one_of(self):
tags = [
{
"confidence": 1.0,
"end_token": 1,
"entities": [
{
"confidence": 1.0,
"data": [
[
"what is",
"skill_iot_controlINFORMATION_QUERY"
]
],
"key": "what is",
"match": "what is"
}
],
"from_context": False,
"key": "what is",
"match": "what is",
"start_token": 0
},
{
"end_token": 3,
"entities": [
{
"confidence": 1.0,
"data": [
[
"temperature",
"skill_weatherTemperature"
],
[
"temperature",
"skill_iot_controlTEMPERATURE"
]
],
"key": "temperature",
"match": "temperature"
}
],
"from_context": False,
"key": "temperature",
"match": "temperature",
"start_token": 3
},
{
"confidence": 1.0,
"end_token": 7,
"entities": [
{
"confidence": 1.0,
"data": [
[
"living room",
"skill_iot_controlENTITY"
]
],
"key": "living room",
"match": "living room"
}
],
"from_context": False,
"key": "living room",
"match": "living room",
"start_token": 6
}
]
at_least_one = [
[
"skill_iot_controlINFORMATION_QUERY"
],
[
"skill_iot_controlTEMPERATURE",
"skill_iot_controlENTITY"
],
[
"skill_iot_controlTEMPERATURE"
]
]
result = {
"skill_iot_controlENTITY": [
{
"confidence": 1.0,
"end_token": 7,
"entities": [
{
"confidence": 1.0,
"data": [
[
"living room",
"skill_iot_controlENTITY"
]
],
"key": "living room",
"match": "living room"
}
],
"from_context": False,
"key": "living room",
"match": "living room",
"start_token": 6
}
],
"skill_iot_controlINFORMATION_QUERY": [
{
"confidence": 1.0,
"end_token": 1,
"entities": [
{
"confidence": 1.0,
"data": [
[
"what is",
"skill_iot_controlINFORMATION_QUERY"
]
],
"key": "what is",
"match": "what is"
}
],
"from_context": False,
"key": "what is",
"match": "what is",
"start_token": 0
}
],
"skill_iot_controlTEMPERATURE": [
{
"end_token": 3,
"entities": [
{
"confidence": 1.0,
"data": [
[
"temperature",
"skill_weatherTemperature"
],
[
"temperature",
"skill_iot_controlTEMPERATURE"
]
],
"key": "temperature",
"match": "temperature"
}
],
"from_context": False,
"key": "temperature",
"match": "temperature",
"start_token": 3
}
]
}
assert resolve_one_of(tags, at_least_one) == result
# noinspection PyPep8Naming
def TestTag(tag_name,
tag_value,
tag_confidence=1.0,
entity_confidence=1.0,
match=None):
"""
Create a dict in the shape of a tag as yielded from parser.
:param tag_name: tag name (equivalent to a label)
:param tag_value: tag value (value being labeled)
:param tag_confidence: confidence of parse of the tag, influenced by
fuzzy matching or context
:param entity_confidence: weight of the entity, influenced by
context
:param match: the text matched by the parser, which may not match tag_value
in the case of an alias or fuzzy matching. Defaults to tag_value.
Uses "from_context" attribute to force token positioning to be ignored.
:return: a dict that matches the shape of a parser tag
"""
return {
"confidence": tag_confidence,
"entities": [
{
"confidence": entity_confidence,
"data": [
[
tag_value,
tag_name
]
],
"key": tag_value,
"match": match or tag_value
}
],
"from_context": False,
"key": tag_value,
"match": match or tag_value,
"start_token": -1,
"end_token": -1,
"from_context": True
}
class IntentUtilityFunctionsTest(unittest.TestCase):
def test_choose_1_from_each_empty(self):
expected = []
actual = list(choose_1_from_each([[]]))
self.assertListEqual(expected, actual)
def test_choose_1_from_each_basic(self):
inputs = [
['A', 'B'],
['C', 'D']
]
expected = [
['A', 'C'],
['A', 'D'],
['B', 'C'],
['B', 'D']
]
actual = list(choose_1_from_each(inputs))
self.assertListEqual(expected, actual)
def test_choose_1_from_each_varying_sizes(self):
inputs = [
['A'],
['B', 'C'],
['D', 'E', 'F']
]
expected = [
['A', 'B', 'D'],
['A', 'B', 'E'],
['A', 'B', 'F'],
['A', 'C', 'D'],
['A', 'C', 'E'],
['A', 'C', 'F'],
]
actual = list(choose_1_from_each(inputs))
self.assertListEqual(expected, actual)
class IntentScoringTest(unittest.TestCase):
def setUp(self):
self.require_intent = IntentBuilder('require_intent'). \
require('required'). \
build()
self.one_of_intent = IntentBuilder('one_of_intent'). \
one_of('one_of_1', 'one_of_2'). \
build()
self.optional_intent = IntentBuilder('optional_intent'). \
optionally('optional'). \
build()
self.all_features_intent = IntentBuilder('test_intent'). \
require('required'). \
one_of('one_of_1', 'one_of_2'). \
optionally('optional'). \
build()
def test_basic_scoring_default_weights(self):
required = TestTag('required', 'foo')
one_of_1 = TestTag('one_of_1', 'bar')
optional = TestTag('optional', 'bing')
intent, tags = \
self.require_intent.validate_with_tags([required],
confidence=1.0)
self.assertEqual(1.0, intent.get('confidence'))
self.assertListEqual([required], tags)
intent, tags = \
self.one_of_intent.validate_with_tags([one_of_1],
confidence=1.0)
self.assertEqual(1.0, intent.get('confidence'))
self.assertListEqual([one_of_1], tags)
intent, tags = \
self.optional_intent.validate_with_tags([optional],
confidence=1.0)
self.assertEqual(1.0, intent.get('confidence'))
self.assertListEqual([optional], tags)
def test_weighted_scoring_from_regex_entities(self):
required = TestTag('required', 'foo', entity_confidence=0.5)
one_of_1 = TestTag('one_of_1', 'bar', entity_confidence=0.5)
optional = TestTag('optional', 'bing', entity_confidence=0.5)
intent, tags = \
self.require_intent.validate_with_tags([required],
confidence=1.0)
self.assertEqual(0.5, intent.get('confidence'))
self.assertListEqual([required], tags)
intent, tags = \
self.one_of_intent.validate_with_tags([one_of_1],
confidence=1.0)
self.assertEqual(0.5, intent.get('confidence'))
self.assertListEqual([one_of_1], tags)
intent, tags = \
self.optional_intent.validate_with_tags([optional],
confidence=1.0)
self.assertEqual(0.5, intent.get('confidence'))
self.assertListEqual([optional], tags)
def test_weighted_scoring_from_fuzzy_matching(self):
required = TestTag('required', 'foo')
one_of_1 = TestTag('one_of_1', 'bar')
optional = TestTag('optional', 'bing')
intent, tags = \
self.require_intent.validate_with_tags([required],
confidence=0.5)
self.assertEqual(0.5, intent.get('confidence'))
self.assertListEqual([required], tags)
intent, tags = \
self.one_of_intent.validate_with_tags([one_of_1],
confidence=0.5)
self.assertEqual(0.5, intent.get('confidence'))
self.assertListEqual([one_of_1], tags)
intent, tags = \
self.optional_intent.validate_with_tags([optional],
confidence=0.5)
self.assertEqual(0.5, intent.get('confidence'))
self.assertListEqual([optional], tags)
| |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from collections import Mapping, Set
from decimal import Decimal, Context, Clamped
from decimal import Overflow, Inexact, Underflow, Rounded
from botocore.compat import six
STRING = 'S'
NUMBER = 'N'
BINARY = 'B'
STRING_SET = 'SS'
NUMBER_SET = 'NS'
BINARY_SET = 'BS'
NULL = 'NULL'
BOOLEAN = 'BOOL'
MAP = 'M'
LIST = 'L'
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
BINARY_TYPES = (bytearray, six.binary_type)
class Binary(object):
"""A class for representing Binary in dynamodb
Especially for Python 2, use this class to explicitly specify
binary data for item in DynamoDB. It is essentially a wrapper around
binary. Unicode and Python 3 string types are not allowed.
"""
def __init__(self, value):
if not isinstance(value, BINARY_TYPES):
raise TypeError('Value must be of the following types: %s.' %
', '.join([str(t) for t in BINARY_TYPES]))
self.value = value
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%r)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
class TypeSerializer(object):
"""This class serializes Python data types to DynamoDB types."""
def serialize(self, value):
"""The method to serialize the Python data types.
:param value: A python value to be serialized to DynamoDB. Here are
the various conversions:
Python DynamoDB
------ --------
None {'NULL': True}
True/False {'BOOL': True/False}
int/Decimal {'N': str(value)}
string {'S': string}
Binary/bytearray/bytes (py3 only) {'B': bytes}
set([int/Decimal]) {'NS': [str(value)]}
set([string]) {'SS': [string])
set([Binary/bytearray/bytes]) {'BS': [bytes]}
list {'L': list}
dict {'M': dict}
For types that involve numbers, it is recommended that ``Decimal``
objects are used to be able to round-trip the Python type.
For types that involve binary, it is recommended that ``Binary``
objects are used to be able to round-trip the Python type.
:rtype: dict
:returns: A dictionary that represents a dynamoDB data type. These
dictionaries can be directly passed to botocore methods.
"""
dynamodb_type = self._get_dynamodb_type(value)
serializer = getattr(self, '_serialize_%s' % dynamodb_type.lower())
return {dynamodb_type: serializer(value)}
def _get_dynamodb_type(self, value):
dynamodb_type = None
if self._is_null(value):
dynamodb_type = NULL
elif self._is_boolean(value):
dynamodb_type = BOOLEAN
elif self._is_number(value):
dynamodb_type = NUMBER
elif self._is_string(value):
dynamodb_type = STRING
elif self._is_binary(value):
dynamodb_type = BINARY
elif self._is_type_set(value, self._is_number):
dynamodb_type = NUMBER_SET
elif self._is_type_set(value, self._is_string):
dynamodb_type = STRING_SET
elif self._is_type_set(value, self._is_binary):
dynamodb_type = BINARY_SET
elif self._is_map(value):
dynamodb_type = MAP
elif self._is_list(value):
dynamodb_type = LIST
else:
msg = 'Unsupported type "%s" for value "%s"' % (type(value), value)
raise TypeError(msg)
return dynamodb_type
def _is_null(self, value):
if value is None:
return True
return False
def _is_boolean(self, value):
if isinstance(value, bool):
return True
return False
def _is_number(self, value):
if isinstance(value, (six.integer_types, Decimal)):
return True
elif isinstance(value, float):
raise TypeError(
'Float types are not supported. Use Decimal types instead.')
return False
def _is_string(self, value):
if isinstance(value, six.string_types):
return True
return False
def _is_binary(self, value):
if isinstance(value, Binary):
return True
elif isinstance(value, bytearray):
return True
elif six.PY3 and isinstance(value, six.binary_type):
return True
return False
def _is_set(self, value):
if isinstance(value, Set):
return True
return False
def _is_type_set(self, value, type_validator):
if self._is_set(value):
if False not in map(type_validator, value):
return True
return False
def _is_map(self, value):
if isinstance(value, Mapping):
return True
return False
def _is_list(self, value):
if isinstance(value, list):
return True
return False
def _serialize_null(self, value):
return True
def _serialize_bool(self, value):
return value
def _serialize_n(self, value):
number = str(DYNAMODB_CONTEXT.create_decimal(value))
if number in ['Infinity', 'NaN']:
raise TypeError('Infinity and NaN not supported')
return number
def _serialize_s(self, value):
return value
def _serialize_b(self, value):
if isinstance(value, Binary):
value = value.value
return value
def _serialize_ss(self, value):
return [self._serialize_s(s) for s in value]
def _serialize_ns(self, value):
return [self._serialize_n(n) for n in value]
def _serialize_bs(self, value):
return [self._serialize_b(b) for b in value]
def _serialize_l(self, value):
return [self.serialize(v) for v in value]
def _serialize_m(self, value):
return dict([(k, self.serialize(v)) for k, v in value.items()])
class TypeDeserializer(object):
"""This class deserializes DynamoDB types to Python types."""
def deserialize(self, value):
"""The method to deserialize the DynamoDB data types.
:param value: A DynamoDB value to be deserialized to a pythonic value.
Here are the various conversions:
DynamoDB Python
-------- ------
{'NULL': True} None
{'BOOL': True/False} True/False
{'N': str(value)} Decimal(str(value))
{'S': string} string
{'B': bytes} Binary(bytes)
{'NS': [str(value)]} set([Decimal(str(value))])
{'SS': [string]} set([string])
{'BS': [bytes]} set([bytes])
{'L': list} list
{'M': dict} dict
:returns: The pythonic value of the DynamoDB type.
"""
if not value:
raise TypeError('Value must be a nonempty dictionary whose key '
'is a valid dynamodb type.')
dynamodb_type = list(value.keys())[0]
try:
deserializer = getattr(
self, '_deserialize_%s' % dynamodb_type.lower())
except AttributeError:
raise TypeError(
'Dynamodb type %s is not supported' % dynamodb_type)
return deserializer(value[dynamodb_type])
def _deserialize_null(self, value):
return None
def _deserialize_bool(self, value):
return value
def _deserialize_n(self, value):
return DYNAMODB_CONTEXT.create_decimal(value)
def _deserialize_s(self, value):
return value
def _deserialize_b(self, value):
return Binary(value)
def _deserialize_ns(self, value):
return set(map(self._deserialize_n, value))
def _deserialize_ss(self, value):
return set(map(self._deserialize_s, value))
def _deserialize_bs(self, value):
return set(map(self._deserialize_b, value))
def _deserialize_l(self, value):
return [self.deserialize(v) for v in value]
def _deserialize_m(self, value):
return dict([(k, self.deserialize(v)) for k, v in value.items()])
| |
import os
import utils
from SCons.Environment import Environment
from SCons.Script import Exit
from SCons.Script import Action
from SCons.Script import Split
def checkPython(context):
context.Message("Check for python..")
context.Result(0)
return False
def checkStaticSDL(context):
context.Message("Checking for static SDL... ")
env = context.env
try:
utils.safeParseConfig(env, 'sdl-config --static-libs --cflags')
env.Append(CPPDEFINES = ['USE_SDL'])
# FIXME: move sdl main to a new check
env.Append(CPPDEFINES = ['USE_SDL_MAIN'])
except Exception:
context.Result(utils.colorResult(0))
return 0
context.Result(utils.colorResult(1))
return 1
def checkStaticOgg(context):
context.Message("Checking for static ogg and vorbis... ")
tmp = context.env.Clone()
env = context.env
env['HAVE_OGG'] = True
env.Append(CPPDEFINES = ['HAVE_OGG'])
(ok, stuff) = context.TryAction(Action("pkg-config --version"))
if ok:
try:
utils.safeParseConfig(env, 'pkg-config vorbisfile --cflags')
# Strip off the -L part
libdir = utils.readExec('pkg-config vorbisfile --libs-only-L')[2:].rstrip()
# Hack to hardcode these libraries
vorbisfile = env.Install('misc', "%s/libvorbisfile.a" % libdir)
ogg = env.Install('misc', "%s/libogg.a" % libdir)
vorbis = env.Install('misc', "%s/libvorbis.a" % libdir)
env.Append(LIBS = [vorbisfile, ogg, vorbis])
except OSError:
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
main = 'int main(int argc, char ** argv)'
try:
if env['HAVE_SDL_MAIN']:
main = 'int SDL_main(int argc, char ** argv)'
except KeyError:
pass
ret = context.TryLink("""
#include <vorbis/vorbisfile.h>
#include <stdio.h>
%(main)s {
OggVorbis_File ovf;
FILE * f;
ov_open_callbacks(f, &ovf, 0, 0, OV_CALLBACKS_DEFAULT);
return 0;
}
""" % {'main' : main}, ".c")
if not ret:
context.sconf.env = tmp
context.Result(utils.colorResult(ret))
return ret
def checkStaticZ(context):
context.Message("Checking for static z... ")
tmp = context.env.Clone()
env = context.env
(ok, stuff) = context.TryAction(Action("pkg-config --version"))
if ok:
try:
utils.safeParseConfig(env, 'pkg-config zlib --cflags')
# Strip off the -L part
libdir = utils.readExec('pkg-config zlib --libs-only-L')[2:].rstrip()
# Hack to hardcode these libraries
zlib = env.Install('misc', "%s/libz.a" % libdir)
env.Append(LIBS = [zlib])
except OSError:
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
# FIXME: write proper test
ret = context.TryLink("""
int main(int argc, char ** argv){
return 0;
}
""", ".c")
if not ret:
context.sconf.env = tmp
context.Result(utils.colorResult(ret))
return ret
def findLibDir(output):
import re
find = re.compile('-L([^\\s]*)')
match = find.match(output)
if match:
return match.group(1)
raise Exception("Could not find lib dir!")
def checkStaticFreetype(context):
context.Message("Checking for static freetype... ")
tmp = context.env.Clone()
env = context.env
(ok, stuff) = context.TryAction(Action("pkg-config --version"))
if ok:
try:
utils.safeParseConfig(env, 'freetype-config --cflags')
# Strip off the -L part
libdir = findLibDir(utils.readExec('freetype-config --libs'))
# Hack to hardcode these libraries
freetype = env.Install('misc', '%s/libfreetype.a' % libdir)
env.Append(LIBS = [freetype])
except OSError:
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
except Exception, e:
print e
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
# FIXME: write proper test
ret = context.TryLink("""
#include <stdio.h>
int main(int argc, char ** argv){
return 0;
}
""", ".c")
if not ret:
context.sconf.env = tmp
context.Result(utils.colorResult(ret))
return ret
def checkStaticPng(context):
context.Message("Checking for static png... ")
tmp = context.env.Clone()
env = context.env
try:
utils.safeParseConfig(env, 'libpng-config --cflags')
libdir = utils.readExec('libpng-config --libdir')
# Hack to hardcode these libraries
png = env.Install('misc', '%s/libpng.a' % libdir)
env.Append(LIBS = [png])
except OSError:
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
except Exception, e:
print e
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
# FIXME: write proper test
ret = context.TryLink("""
#include <stdio.h>
int main(int argc, char ** argv){
return 0;
}
""", ".c")
if not ret:
context.sconf.env = tmp
context.Result(utils.colorResult(ret))
return ret
def checkMallocH(context):
tmp = context.env.Clone()
env = context.env
env.Append(CPPPATH = ['/usr/include/sys'])
ok = context.sconf.CheckCHeader('malloc.h')
if not ok:
context.sconf.env = tmp
context.Result(ok)
return ok
def checkStaticMpg123(context):
context.Message("Checking for static libmpg123... ")
tmp = context.env.Clone()
env = context.env
env['HAVE_MP3_MPG123'] = True
env.Append(CPPDEFINES = ['HAVE_MP3_MPG123'])
(ok, stuff) = context.TryAction(Action("pkg-config --version"))
if ok:
try:
utils.safeParseConfig(env,'pkg-config libmpg123 --cflags')
# Strip off the -L part
libdir = utils.readExec('pkg-config libmpg123 --libs-only-L')[2:].rstrip()
# Hack to hardcode these libraries
mpg123 = env.Install('misc', "%s/libmpg123.a" % libdir)
env.Append(LIBS = [mpg123])
except OSError:
context.sconf.env = tmp
context.Result(utils.colorResult(0))
return 0
ret = context.TryLink("""
#include <mpg123.h>
int main(int argc, char ** argv){
int err = mpg123_init();
if (err == MPG123_OK){
return 0;
}
return 1;
}
""", ".c")
if not ret:
context.sconf.env = tmp
context.Result(utils.colorResult(ret))
return ret
def checkStaticAllegro4(context):
context.Message("Check for static Allegro4..")
context.Result(0)
return False
def checkStaticAllegro5(context):
context.Message("Check for static Allegro5..")
context.Result(0)
return False
def getStaticEnvironment():
environment = Environment(ENV = os.environ)
peg_color = 'light-cyan'
environment['PAINTOWN_PLATFORM'] = ['osx']
environment['PAINTOWN_USE_PRX'] = False
environment['PAINTOWN_TESTS'] = {'CheckPython': checkPython}
environment['PAINTOWN_COLORIZE'] = utils.colorize
environment['PAINTOWN_NETWORKING'] = True
environment['PAINTOWN_NETWORKING'] = True
environment.Append(CPPDEFINES = ['HAVE_NETWORKING'])
environment.Append(CPPDEFINES = ['MACOSX'])
environment['LIBS'] = []
environment['PEG_MAKE'] = "%s %s" % (utils.colorize('Creating peg parser', peg_color), utils.colorize('$TARGET', 'light-blue'))
environment.Append(BUILDERS = {'Peg' : utils.pegBuilder(environment)})
environment.Append(CPPPATH = ['#src', '#src/util/network/hawknl'])
# environment.Append(CCFLAGS = Split("-arch i386 -arch x86_64"))
# print environment['CCCOM']
# I don't know why appending -arch to ccflags doesn't work, but whatever
environment['CCCOM'] = '$CC -arch i386 -arch x86_64 $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES -c -o $TARGET'
environment['CXXCOM'] = '$CXX -arch i386 -arch x86_64 -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
environment['LINKCOM'] = '$CXX $LINKFLAGS -arch i386 -arch x86_64 $SOURCES $_FRAMEWORKS -Wl,-all_load $ARCHIVES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET'
# Default preference for a graphics renderer / input system
backends = ['SDL', 'Allegro4', 'Allegro5']
if utils.useAllegro4():
backends = ['Allegro4', 'SDL', 'Allegro5']
if utils.useAllegro5():
backends = ['Allegro5', 'SDL', 'Allegro4']
#environment.ParseConfig('freetype-config --libs --cflags')
#environment.ParseConfig('libpng-config --libs --cflags')
if utils.useLLVM():
llvm(environment)
custom_tests = {"CheckCompiler": utils.checkCompiler,
"CheckSDL" : checkStaticSDL,
"CheckOgg" : checkStaticOgg,
"CheckFreetype" : checkStaticFreetype,
"CheckMalloc" : checkMallocH,
"CheckZ" : checkStaticZ,
"CheckPng" : checkStaticPng,
"CheckMpg123" : checkStaticMpg123,
"CheckAllegro4" : checkStaticAllegro4,
"CheckAllegro5" : checkStaticAllegro5}
config = environment.Configure(custom_tests = custom_tests)
config.CheckZ()
config.CheckOgg()
config.CheckMpg123()
config.CheckFreetype()
config.CheckPng()
# config.CheckMalloc()
environment = config.Finish()
return utils.configure_backend(environment, backends, custom_tests)
| |
#!/usr/bin/env python
#
# * Copyright 2012-2014 by Aerospike.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a copy
# * of this software and associated documentation files (the "Software"), to
# * deal in the Software without restriction, including without limitation the
# * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# * sell copies of the Software, and to permit persons to whom the Software is
# * furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included in
# * all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# * IN THE SOFTWARE.
#
from __future__ import print_function
import aerospike
import sys
import random
AS_POLICY_W_EXISTS = "exists"
AS_POLICY_EXISTS_UNDEF = 0 # Use default value
AS_POLICY_EXISTS_IGNORE= 1 # Write the record, regardless of existence.
AS_POLICY_EXISTS_CREATE= 2 # Create a record, ONLY if it doesn't exist.
AS_POLICY_EXISTS_UPDATE= 3 # Update a record, ONLY if it exist (NOT YET IMPL).
class UserService(object):
#client
def __init__(self, client):
self.client = client
def createUser(self):
print("\n********** Create User **********\n")
# /*********************///
# /*****Data Model*****///
# Namespace: test
# Set: users
# Key: <username>
# Bins:
# username - String
# password - String (For simplicity password is stored in plain-text)
# gender - String (Valid values are 'm' or 'f')
# region - String (Valid values are: 'n' (North), 's' (South), 'e' (East), 'w' (West) -- to keep data entry to minimal we just store the first letter)
# lasttweeted - int (Stores epoch timestamp of the last/most recent tweet) -- Default to 0
# tweetcount - int (Stores total number of tweets for the user) -- Default to 0
# interests - Array of interests
# Sample Key: dash
# Sample Record:
# { username: 'dash',
# password: 'dash',
# gender: 'm',
# region: 'w',
# lasttweeted: 1408574221,
# tweetcount: 20,
# interests: ['photography', 'technology', 'dancing', 'house music]
# }
# /*********************///
username = str()
password = str()
gender = str()
region = str()
interests = str()
# Get username
username = raw_input("Enter username: ")
record = { "username": username }
if len(username) > 0:
# Get password
record['password'] = raw_input("Enter password for " + username + ":")
# Get gender
record['gender'] = raw_input("Select gender (f or m) for " + username + ":")
# Get region
record['region'] = raw_input("Select region (north, south, east or west) for " + username + ":")
# Get interests
record['interests'] = raw_input("Enter comma-separated interests for " + username + ":").split(',')
# Write record
#wPolicy.recordExistsAction = RecordExistsAction.UPDATE
meta = None
policy = None
self.client.put(("test", "users", username),record,meta,policy)
print(record, "\nINFO: User record created!")
def getUser(self):
userRecord = None
userKey = None
# Get username
username = str()
username = raw_input("Enter username: ")
if len(username) > 0:
# Check if username exists
meta = None
policy = None
userKey = ("test", "users", username)
(key, metadata,userRecord) = self.client.get(userKey,policy)
if userRecord:
print("\nINFO: User record read successfully! Here are the details:\n")
print("username: " , userRecord["username"] , "\n")
print("password: " , userRecord["password"] , "\n")
print("gender: " , userRecord["gender"] , "\n")
print("region: " , userRecord["region"] , "\n")
print("tweetcount: ", userRecord["tweetcount"],"\n")
print("interests: " , userRecord["interests"] , "\n")
else:
print("ERROR: User record not found!\n")
else:
print("ERROR: User record not found!\n")
def updatePasswordUsingCAS(self):
userRecord = None
userKey = None
passwordBin = None
# Get username
username = str()
username = raw_input("Enter username: ")
if len(username) > 0:
# Check if username exists
meta = None
policy = None
userKey = ("test", "users", username)
(key, metadata,userRecord) = self.client.get(userKey,policy)
if userRecord:
record = {}
# Get new password
record["password"] = raw_input("Enter new password for " + username + ":")
# record generation
#writePolicy.generation = userRecord.generation
#writePolicy.generationPolicy = GenerationPolicy.EXPECT_GEN_EQUAL
self.client.put(userKey,record,meta,policy)
print("\nINFO: The password has been set to: " , record["password"])
else:
print("ERROR: User record not found!")
else:
print("ERROR: User record not found!")
def batchGetUserTweets(self):
userRecord = None
userKey = None
# Get username
username = str()
username = raw_input("Enter username: ")
if len(username) > 0:
# Check if username exists
meta = None
policy = None
userKey = ("test", "users", username)
(key, metadata,userRecord) = self.client.get(userKey,policy)
if userRecord:
keys = {}
i = 0
while i < userRecord["tweetcount"]:
keys[i] = ("test", "tweets", (username + ":" + str((i + 1))))
i += 1
print("\nHere's " + username + "'s tweet(s):\n")
# Initiate batch read operation
if len(keys) > 0:
for k in keys.values():
(key,meta,record) = self.client.get(k,policy)
print(record["tweet"] ,"\n")
else:
print("ERROR: User record not found!\n")
def aggregateUsersByTweetCountByRegion(self):
""" generated source for method aggregateUsersByTweetCountByRegion """
def createUsers(self):
""" generated source for method createUsers """
genders = ["m", "f"]
regions = ["n", "s", "e", "w"]
randomInterests = ["Music", "Football", "Soccer", "Baseball", "Basketball", "Hockey", "Weekend Warrior", "Hiking", "Camping", "Travel", "Photography"]
username = str()
userInterests = None
totalInterests = 0
start = 1
end = 100000
totalUsers = end - start
wr_policy = {
AS_POLICY_W_EXISTS: AS_POLICY_EXISTS_IGNORE
}
print("\nCreate " , totalUsers , " users. Press any key to continue...\n")
raw_input("..")
j = start
while j <= end:
username = "user" + str(j)
meta = None
key = ("test", "users", username)
record = {}
record["username"] = username
record["password"] = 'pwd' + str(j)
record["gender"] = random.choice(genders)
record["region"] = random.choice(regions)
record["lasttweeted"] = 0
record["tweetcount"] = 0
record["interests"] = randomInterests[:random.randint(1,9)]
self.client.put(key,record,meta,wr_policy)
print("Wrote user record for " , username , "\n")
j += 1
# Write user record
print("\nDone creating " , totalUsers , "!\n")
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id: __init__.py 2088 2008-05-29 12:44:43Z Alex.Holkner $
import ctypes
import heapq
import sys
import threading
import time
import Queue
import pyglet
_debug = pyglet.options['debug_media']
import mt_media
import lib_openal as al
import lib_alc as alc
class OpenALException(mt_media.MediaException):
pass
# TODO move functions into context/driver?
def _split_nul_strings(s):
# NUL-separated list of strings, double-NUL-terminated.
nul = False
i = 0
while True:
if s[i] == '\0':
if nul:
break
else:
nul = True
else:
nul = False
i += 1
s = s[:i - 1]
return s.split('\0')
def get_extensions():
extensions = alc.alcGetString(context._device, alc.ALC_EXTENSIONS)
if sys.platform == 'darwin':
return ctypes.cast(extensions, ctypes.c_char_p).value.split(' ')
else:
return _split_nul_strings(extensions)
def have_extension(extension):
return extension in get_extensions()
format_map = {
(1, 8): al.AL_FORMAT_MONO8,
(1, 16): al.AL_FORMAT_MONO16,
(2, 8): al.AL_FORMAT_STEREO8,
(2, 16): al.AL_FORMAT_STEREO16,
}
class OpenALWorker(mt_media.MediaThread):
# Minimum size to bother refilling (bytes)
_min_write_size = 512
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(OpenALWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
self.condition.acquire()
if self.stopped:
self.condition.release()
break
sleep_time = -1
# Refill player with least write_size
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if sleep_time != -1:
self.sleep(sleep_time)
def add(self, player):
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
def remove(self, player):
self.condition.acquire()
self.players.remove(player)
self.condition.notify()
self.condition.release()
class OpenALAudioPlayer(mt_media.AbstractAudioPlayer):
#: Minimum size of an OpenAL buffer worth bothering with, in bytes
_min_buffer_size = 512
#: Aggregate (desired) buffer size, in bytes
_ideal_buffer_size = 44800
def __init__(self, source_group, player):
super(OpenALAudioPlayer, self).__init__(source_group, player)
audio_format = source_group.audio_format
try:
self._al_format = format_map[(audio_format.channels,
audio_format.sample_size)]
except KeyError:
raise OpenALException('Unsupported audio format.')
self._al_source = al.ALuint()
al.alGenSources(1, self._al_source)
# Lock policy: lock all instance vars (except constants). (AL calls
# are locked on context).
self._lock = threading.RLock()
# Cursor positions, like DSound and Pulse drivers, refer to a
# hypothetical infinite-length buffer. Cursor units are in bytes.
# Cursor position of current (head) AL buffer
self._buffer_cursor = 0
# Estimated playback cursor position (last seen)
self._play_cursor = 0
# Cursor position of end of queued AL buffer.
self._write_cursor = 0
# List of currently queued buffer sizes (in bytes)
self._buffer_sizes = []
# List of currently queued buffer timestamps
self._buffer_timestamps = []
# Timestamp at end of last written buffer (timestamp to return in case
# of underrun)
self._underrun_timestamp = None
# List of (cursor, MediaEvent)
self._events = []
# Desired play state (True even if stopped due to underrun)
self._playing = False
# Has source group EOS been seen (and hence, event added to queue)?
self._eos = False
# OpenAL 1.0 timestamp interpolation: system time of current buffer
# playback (best guess)
if not context.have_1_1:
self._buffer_system_time = time.time()
self.refill(self._ideal_buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
return
# XXX TODO crashes
context.lock()
al.alDeleteSources(1, self._al_source)
context.unlock()
self._al_source = None
def play(self):
if self._playing:
return
self._playing = True
self._al_play()
if not context.have_1_1:
self._buffer_system_time = time.time()
context.worker.add(self)
def _al_play(self):
context.lock()
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
al.alSourcePlay(self._al_source)
context.unlock()
def stop(self):
if not self._playing:
return
self._pause_timestamp = self.get_time()
context.lock()
al.alSourcePause(self._al_source)
context.unlock()
self._playing = False
context.worker.remove(self)
def clear(self):
self._lock.acquire()
context.lock()
al.alSourceStop(self._al_source)
self._playing = False
del self._events[:]
self._underrun_timestamp = None
self._buffer_timestamps = [None for _ in self._buffer_timestamps]
context.unlock()
self._lock.release()
def _update_play_cursor(self):
self._lock.acquire()
context.lock()
# Release spent buffers
processed = al.ALint()
al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed)
processed = processed.value
if processed:
buffers = (al.ALuint * processed)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
al.alDeleteBuffers(len(buffers), buffers)
context.unlock()
if processed:
if len(self._buffer_timestamps) == processed:
# Underrun, take note of timestamp
self._underrun_timestamp = \
self._buffer_timestamps[-1] + \
self._buffer_sizes[-1] / \
float(self.source_group.audio_format.bytes_per_second)
self._buffer_cursor += sum(self._buffer_sizes[:processed])
del self._buffer_sizes[:processed]
del self._buffer_timestamps[:processed]
if not context.have_1_1:
self._buffer_system_time = time.time()
# Update play cursor using buffer cursor + estimate into current
# buffer
if context.have_1_1:
bytes = al.ALint()
context.lock()
al.alGetSourcei(self._al_source, al.AL_BYTE_OFFSET, bytes)
context.unlock()
if _debug:
print 'got bytes offset', bytes.value
self._play_cursor = self._buffer_cursor + bytes.value
else:
# Interpolate system time past buffer timestamp
self._play_cursor = \
self._buffer_cursor + int(
(time.time() - self._buffer_system_time) * \
self.source_group.audio_format.bytes_per_second)
# Process events
while self._events and self._events[0][0] < self._play_cursor:
_, event = self._events.pop(0)
event._sync_dispatch_to_player(self.player)
self._lock.release()
def get_write_size(self):
self._lock.acquire()
self._update_play_cursor()
write_size = self._ideal_buffer_size - \
(self._write_cursor - self._play_cursor)
if self._eos:
write_size = 0
self._lock.release()
return write_size
def refill(self, write_size):
if _debug:
print 'refill', write_size
self._lock.acquire()
while write_size > self._min_buffer_size:
audio_data = self.source_group.get_audio_data(write_size)
if not audio_data:
self._eos = True
self._events.append(
(self._write_cursor, mt_media.MediaEvent(0, 'on_eos')))
self._events.append(
(self._write_cursor,
mt_media.MediaEvent(0, 'on_source_group_eos')))
break
for event in audio_data.events:
cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((cursor, event))
buffer = al.ALuint()
context.lock()
al.alGenBuffers(1, buffer)
al.alBufferData(buffer,
self._al_format,
audio_data.data,
audio_data.length,
self.source_group.audio_format.sample_rate)
al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer))
context.unlock()
self._write_cursor += audio_data.length
self._buffer_sizes.append(audio_data.length)
self._buffer_timestamps.append(audio_data.timestamp)
write_size -= audio_data.length
# Check for underrun stopping playback
if self._playing:
state = al.ALint()
context.lock()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
if _debug:
print 'underrun'
al.alSourcePlay(self._al_source)
context.unlock()
self._lock.release()
def get_time(self):
try:
buffer_timestamp = self._buffer_timestamps[0]
except IndexError:
return self._underrun_timestamp
if buffer_timestamp is None:
return None
return buffer_timestamp + \
(self._play_cursor - self._buffer_cursor) / \
float(self.source_group.audio_format.bytes_per_second)
def set_volume(self, volume):
context.lock()
al.alSourcef(self._al_source, al.AL_GAIN, max(0, volume))
context.unlock()
def set_position(self, position):
x, y, z = position
context.lock()
al.alSource3f(self._al_source, al.AL_POSITION, x, y, z)
context.unlock()
def set_min_distance(self, min_distance):
context.lock()
al.alSourcef(self._al_source, al.AL_REFERENCE_DISTANCE, min_distance)
context.unlock()
def set_max_distance(self, max_distance):
context.lock()
al.alSourcef(self._al_source, al.AL_MAX_DISTANCE, max_distance)
context.unlock()
def set_pitch(self, pitch):
context.lock()
al.alSourcef(self._al_source, al.AL_PITCH, max(0, pitch))
context.unlock()
def set_cone_orientation(self, cone_orientation):
x, y, z = cone_orientation
context.lock()
al.alSource3f(self._al_source, al.AL_DIRECTION, x, y, z)
context.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
context.lock()
al.alSourcef(self._al_source, al.AL_CONE_INNER_ANGLE, cone_inner_angle)
context.unlock()
def set_cone_outer_angle(self, cone_outer_angle):
context.lock()
al.alSourcef(self._al_source, al.AL_CONE_OUTER_ANGLE, cone_outer_angle)
context.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
context.lock()
al.alSourcef(self._al_source, al.AL_CONE_OUTER_GAIN, cone_outer_gain)
context.unlock()
class OpenALDriver(mt_media.AbstractAudioDriver):
def __init__(self, device_name=None):
super(OpenALDriver, self).__init__()
# TODO devices must be enumerated on Windows, otherwise 1.0 context is
# returned.
self._device = alc.alcOpenDevice(device_name)
if not self._device:
raise Exception('No OpenAL device.')
alcontext = alc.alcCreateContext(self._device, None)
alc.alcMakeContextCurrent(alcontext)
self.have_1_1 = self.have_version(1, 1) and False
self._lock = threading.Lock()
# Start worker thread
self.worker = OpenALWorker()
self.worker.start()
def create_audio_player(self, source_group, player):
return OpenALAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def have_version(self, major, minor):
return (major, minor) <= self.get_version()
def get_version(self):
major = alc.ALCint()
minor = alc.ALCint()
alc.alcGetIntegerv(self._device, alc.ALC_MAJOR_VERSION,
ctypes.sizeof(major), major)
alc.alcGetIntegerv(self._device, alc.ALC_MINOR_VERSION,
ctypes.sizeof(minor), minor)
return major.value, minor.value
# Listener API
def _set_volume(self, volume):
self.lock()
al.alListenerf(al.AL_GAIN, volume)
self.unlock()
self._volume = volume
def _set_position(self, position):
x, y, z = position
self.lock()
al.alListener3f(al.AL_POSITION, x, y, z)
self.unlock()
self._position = position
def _set_forward_orientation(self, orientation):
val = (al.ALfloat * 6)(*(orientation + self._up_orientation))
self.lock()
al.alListenerfv(al.AL_ORIENTATION, val)
self.unlock()
self._forward_orientation = orientation
def _set_up_orientation(self, orientation):
val = (al.ALfloat * 6)(*(self._forward_orientation + orientation))
self.lock()
al.alListenerfv(al.AL_ORIENTATION, val)
self.unlock()
self._up_orientation = orientation
context = None
def create_audio_driver(device_name=None):
global context
context = OpenALDriver(device_name)
if _debug:
print 'OpenAL', context.get_version()
return context
| |
from __future__ import unicode_literals
import os
import appdirs
from reviewbot.config import config
from reviewbot.utils.api import get_api_root
from reviewbot.utils.filesystem import make_tempdir
from reviewbot.utils.log import get_logger
from reviewbot.utils.process import execute
logger = get_logger(__name__)
repositories = {}
class Repository(object):
"""A repository."""
def sync(self):
"""Sync the latest state of the repository."""
pass
class GitRepository(Repository):
"""A git repository."""
def __init__(self, name, clone_path):
"""Initialize the repository.
Args:
name (unicode):
The configured name of the repository.
clone_path (unicode):
The path of the git remote to clone.
"""
self.name = name
self.clone_path = clone_path
self.repo_path = os.path.join(appdirs.site_data_dir('reviewbot'),
'repositories', name)
def sync(self):
"""Sync the latest state of the repository."""
if not os.path.exists(self.repo_path):
os.makedirs(self.repo_path)
logger.info('Cloning repository %s to %s',
self.clone_path, self.repo_path)
execute(['git', 'clone', '--bare', self.clone_path,
self.repo_path])
else:
logger.info('Fetching into existing repository %s',
self.repo_path)
execute(['git', '--git-dir=%s' % self.repo_path, 'fetch',
'origin', '+refs/heads/*:refs/heads/*', '--prune'])
def checkout(self, commit_id):
"""Check out the given commit.
Args:
commit_id (unicode):
The ID of the commit to check out.
Returns:
unicode:
The name of a directory with the given checkout.
"""
workdir = make_tempdir()
branchname = 'br-%s' % commit_id
logger.info('Creating temporary branch for clone in repo %s',
self.repo_path)
execute(['git', '--git-dir=%s' % self.repo_path, 'branch', branchname,
commit_id])
logger.info('Creating working tree for commit ID %s in %s', commit_id,
workdir)
execute(['git', 'clone', '--local', '--no-hardlinks', '--depth', '1',
'--branch', branchname, self.repo_path, workdir])
logger.info('Removing temporary branch for clone in repo %s',
self.repo_path)
execute(['git', '--git-dir=%s' % self.repo_path, 'branch', '-d',
branchname])
return workdir
class HgRepository(Repository):
"""A hg repository."""
def __init__(self, name, clone_path):
"""Initialize the repository.
Args:
name (unicode):
The configured name of the repository.
clone_path (unicode):
The path of the hg repository to clone.
"""
self.name = name
self.clone_path = clone_path
self.repo_path = os.path.join(appdirs.site_data_dir('reviewbot'),
'repositories', name)
def sync(self):
"""Sync the latest state of the repository."""
if not os.path.exists(self.repo_path):
os.makedirs(self.repo_path)
logger.info('Cloning repository %s to %s',
self.clone_path, self.repo_path)
execute(['hg', 'clone', '-U', self.clone_path,
self.repo_path])
else:
logger.info('Pulling into existing repository %s',
self.repo_path)
execute(['hg', '-R', self.repo_path, 'pull'])
def checkout(self, commit_id):
"""Check out the given commit.
Args:
commit_id (unicode):
The ID of the commit to check out.
Returns:
unicode:
The name of a directory with the given checkout.
"""
workdir = make_tempdir()
logger.info('Creating working tree for commit ID %s in %s', commit_id,
workdir)
execute(['hg', '-R', self.repo_path, 'archive', '-r', commit_id,
'-t', 'files', workdir])
return workdir
def fetch_repositories(url, user=None, token=None):
"""Fetch repositories from Review Board.
Args:
url (unicode):
The configured url for the connection.
user (unicode):
The configured user for the connection.
token (unicode):
The configured API token for the user.
"""
logger.info('Fetching repositories from Review Board: %s', url)
root = get_api_root(url=url,
username=user,
api_token=token)
for tool_type in ('Mercurial', 'Git'):
repos = root.get_repositories(tool=tool_type, only_links='',
only_fields='path,mirror_path,name')
for repo in repos.all_items:
repo_source = None
for path in (repo.path, repo.mirror_path):
if (os.path.exists(path) or path.startswith('http') or
path.startswith('git')):
repo_source = path
break
if repo_source:
init_repository(repo.name, tool_type.lower(), repo_source)
else:
logger.warning('Cannot find usable path for repository: %s',
repo.name)
def init_repository(repo_name, repo_type, repo_source):
"""Add repository entry to global list.
Args:
repo_name (unicode):
The name of the repository.
repo_type (unicode):
The type of the repository.
repo_source (unicode):
The source of the repository.
"""
global repositories
if repo_type == 'git':
repositories[repo_name] = \
GitRepository(repo_name, repo_source)
elif repo_type in ('hg', 'mercurial'):
repositories[repo_name] = \
HgRepository(repo_name, repo_source)
else:
logger.error('Unknown type "%s" for configured repository %s',
repo_type, repo_name)
def init_repositories():
"""Set up configured repositories."""
for server in config['reviewboard_servers']:
fetch_repositories(server['url'],
server.get('user'),
server.get('token'))
for repository in config['repositories']:
repo_name = repository['name']
repo_type = repository.get('type')
repo_source = repository['clone_path']
init_repository(repo_name, repo_type, repo_source)
| |
import fnmatch
import six
import yaml
from conans.errors import ConanException
from conans.util.sha import sha1
_falsey_options = ["false", "none", "0", "off", ""]
def option_wrong_value_msg(name, value, value_range):
""" The provided value is not among the range of values that it should
be
"""
return ("'%s' is not a valid 'options.%s' value.\nPossible values are %s"
% (value, name, value_range))
def option_not_exist_msg(option_name, existing_options):
""" Someone is referencing an option that is not available in the current package
options
"""
result = ["'options.%s' doesn't exist" % option_name]
result.append("Possible options are %s" % existing_options or "none")
return "\n".join(result)
def option_undefined_msg(name):
return "'%s' value not defined" % name
class PackageOptionValue(str):
""" thin wrapper around a string value that allows to check for several false string
and also promote other types to string for homegeneous comparison
"""
def __bool__(self):
return self.lower() not in _falsey_options
def __nonzero__(self):
return self.__bool__()
def __eq__(self, other):
return str(other).__eq__(self)
def __ne__(self, other):
return not self.__eq__(other)
class PackageOptionValues(object):
""" set of key(string)-value(PackageOptionValue) for options of a package.
Not prefixed by package name:
static: True
optimized: 2
These are non-validating, not constrained.
Used for UserOptions, which is a dict{package_name: PackageOptionValues}
"""
def __init__(self):
self._dict = {} # {option_name: PackageOptionValue}
self._modified = {}
def __bool__(self):
return bool(self._dict)
def __nonzero__(self):
return self.__bool__()
def __getattr__(self, attr):
if attr not in self._dict:
return None
return self._dict[attr]
def __delattr__(self, attr):
if attr not in self._dict:
return
del self._dict[attr]
def clear(self):
self._dict.clear()
def __setattr__(self, attr, value):
if attr[0] == "_":
return super(PackageOptionValues, self).__setattr__(attr, value)
self._dict[attr] = PackageOptionValue(value)
def copy(self):
result = PackageOptionValues()
for k, v in self._dict.items():
result._dict[k] = v
return result
@property
def fields(self):
return sorted(list(self._dict.keys()))
def keys(self):
return self._dict.keys()
def items(self):
return sorted(list(self._dict.items()))
def add(self, option_text):
assert isinstance(option_text, six.string_types)
name, value = option_text.split("=")
self._dict[name.strip()] = PackageOptionValue(value.strip())
def add_option(self, option_name, option_value):
self._dict[option_name] = PackageOptionValue(option_value)
def update(self, other):
assert isinstance(other, PackageOptionValues)
self._dict.update(other._dict)
def remove(self, option_name):
del self._dict[option_name]
def freeze(self):
self._freeze = True
def propagate_upstream(self, down_package_values, down_ref, own_ref, package_name):
if not down_package_values:
return
assert isinstance(down_package_values, PackageOptionValues)
for (name, value) in down_package_values.items():
if name in self._dict and self._dict.get(name) == value:
continue
if self._freeze:
raise ConanException("%s tried to change %s option %s to %s\n"
"but it was already defined as %s"
% (down_ref, own_ref, name, value, self._dict.get(name)))
modified = self._modified.get(name)
if modified is not None:
modified_value, modified_ref = modified
raise ConanException("%s tried to change %s option %s:%s to %s\n"
"but it was already assigned to %s by %s"
% (down_ref, own_ref, package_name, name, value,
modified_value, modified_ref))
else:
self._modified[name] = (value, down_ref)
self._dict[name] = value
def serialize(self):
return self.items()
@property
def sha(self):
result = []
for name, value in self.items():
# It is important to discard None values, so migrations in settings can be done
# without breaking all existing packages SHAs, by adding a first "None" option
# that doesn't change the final sha
if value:
result.append("%s=%s" % (name, value))
return sha1('\n'.join(result).encode())
class OptionsValues(object):
""" static= True,
Boost.static = False,
Poco.optimized = True
"""
def __init__(self, values=None):
self._package_values = PackageOptionValues()
self._reqs_options = {} # {name("Boost": PackageOptionValues}
if not values:
return
# convert tuple "Pkg:option=value", "..." to list of tuples(name, value)
if isinstance(values, tuple):
values = [item.split("=", 1) for item in values]
# convert dict {"Pkg:option": "value", "..": "..", ...} to list of tuples (name, value)
if isinstance(values, dict):
values = [(k, v) for k, v in values.items()]
# handle list of tuples (name, value)
for (k, v) in values:
k = k.strip()
v = v.strip() if isinstance(v, six.string_types) else v
tokens = k.split(":")
if len(tokens) == 2:
package, option = tokens
package_values = self._reqs_options.setdefault(package.strip(),
PackageOptionValues())
package_values.add_option(option, v)
else:
self._package_values.add_option(k, v)
def update(self, other):
self._package_values.update(other._package_values)
for package_name, package_values in other._reqs_options.items():
pkg_values = self._reqs_options.setdefault(package_name, PackageOptionValues())
pkg_values.update(package_values)
def scope_options(self, name):
if self._package_values:
self._reqs_options.setdefault(name, PackageOptionValues()).update(self._package_values)
self._package_values = PackageOptionValues()
def descope_options(self, name):
package_values = self._reqs_options.pop(name, None)
if package_values:
self._package_values.update(package_values)
def clear_unscoped_options(self):
self._package_values.clear()
def __getitem__(self, item):
return self._reqs_options.setdefault(item, PackageOptionValues())
def __setitem__(self, item, value):
self._reqs_options[item] = value
def pop(self, item):
return self._reqs_options.pop(item, None)
def remove(self, name, package=None):
if package:
self._reqs_options[package].remove(name)
else:
self._package_values.remove(name)
def __repr__(self):
return self.dumps()
def __getattr__(self, attr):
return getattr(self._package_values, attr)
def copy(self):
result = OptionsValues()
result._package_values = self._package_values.copy()
for k, v in self._reqs_options.items():
result._reqs_options[k] = v.copy()
return result
def __setattr__(self, attr, value):
if attr[0] == "_":
return super(OptionsValues, self).__setattr__(attr, value)
return setattr(self._package_values, attr, value)
def __delattr__(self, attr):
delattr(self._package_values, attr)
def clear_indirect(self):
for v in self._reqs_options.values():
v.clear()
def filter_used(self, used_pkg_names):
self._reqs_options = {k: v for k, v in self._reqs_options.items() if k in used_pkg_names}
def as_list(self):
result = []
options_list = self._package_values.items()
if options_list:
result.extend(options_list)
for package_name, package_values in sorted(self._reqs_options.items()):
for option_name, option_value in package_values.items():
result.append(("%s:%s" % (package_name, option_name), option_value))
return result
def dumps(self):
result = []
for key, value in self.as_list():
result.append("%s=%s" % (key, value))
return "\n".join(result)
@staticmethod
def loads(text):
""" parses a multiline text in the form
Package:option=value
other_option=3
OtherPack:opt3=12.1
"""
options = tuple(line.strip() for line in text.splitlines() if line.strip())
return OptionsValues(options)
@property
def sha(self):
result = []
result.append(self._package_values.sha)
for key in sorted(list(self._reqs_options.keys())):
result.append(self._reqs_options[key].sha)
return sha1('\n'.join(result).encode())
def serialize(self):
ret = {}
ret["options"] = self._package_values.serialize()
ret["req_options"] = {}
for name, values in self._reqs_options.items():
ret["req_options"][name] = values.serialize()
return ret
def clear(self):
self._package_values.clear()
self._reqs_options.clear()
class PackageOption(object):
def __init__(self, possible_values, name):
self._name = name
self._value = None
if possible_values == "ANY":
self._possible_values = "ANY"
else:
self._possible_values = sorted(str(v) for v in possible_values)
def __bool__(self):
if not self._value:
return False
return self._value.lower() not in _falsey_options
def __nonzero__(self):
return self.__bool__()
def __str__(self):
return str(self._value)
def __int__(self):
return int(self._value)
def _check_option_value(self, value):
""" checks that the provided value is allowed by current restrictions
"""
if self._possible_values != "ANY" and value not in self._possible_values:
raise ConanException(option_wrong_value_msg(self._name, value, self._possible_values))
def __eq__(self, other):
if other is None:
return self._value is None
other = str(other)
self._check_option_value(other)
return other == self.__str__()
def __ne__(self, other):
return not self.__eq__(other)
def remove(self, values):
if self._possible_values == "ANY":
return
if not isinstance(values, (list, tuple, set)):
values = [values]
values = [str(v) for v in values]
self._possible_values = [v for v in self._possible_values if v not in values]
if self._value is not None:
self._check_option_value(self._value)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
v = str(v)
self._check_option_value(v)
self._value = v
def validate(self):
if self._value is None and "None" not in self._possible_values:
raise ConanException(option_undefined_msg(self._name))
class PackageOptions(object):
def __init__(self, definition):
definition = definition or {}
self._data = {str(k): PackageOption(v, str(k))
for k, v in definition.items()}
self._modified = {}
self._freeze = False
def __contains__(self, option):
return str(option) in self._data
@staticmethod
def loads(text):
return PackageOptions(yaml.safe_load(text) or {})
def get_safe(self, field):
return self._data.get(field)
def validate(self):
for child in self._data.values():
child.validate()
@property
def fields(self):
return sorted(list(self._data.keys()))
def remove(self, item):
if not isinstance(item, (list, tuple, set)):
item = [item]
for it in item:
it = str(it)
self._data.pop(it, None)
def clear(self):
self._data = {}
def _ensure_exists(self, field):
if field not in self._data:
raise ConanException(option_not_exist_msg(field, list(self._data.keys())))
def __getattr__(self, field):
assert field[0] != "_", "ERROR %s" % field
self._ensure_exists(field)
return self._data[field]
def __delattr__(self, field):
assert field[0] != "_", "ERROR %s" % field
self._ensure_exists(field)
del self._data[field]
def __setattr__(self, field, value):
if field[0] == "_" or field.startswith("values"):
return super(PackageOptions, self).__setattr__(field, value)
self._ensure_exists(field)
self._data[field].value = value
@property
def values(self):
result = PackageOptionValues()
for field, package_option in self._data.items():
result.add_option(field, package_option.value)
return result
def _items(self):
result = []
for field, package_option in sorted(list(self._data.items())):
result.append((field, package_option.value))
return result
def items(self):
return self._items()
def iteritems(self):
return self._items()
@values.setter
def values(self, vals):
assert isinstance(vals, PackageOptionValues)
for (name, value) in vals.items():
self._ensure_exists(name)
self._data[name].value = value
def initialize_patterns(self, values):
# Need to apply only those that exists
for option, value in values.items():
if option in self._data:
self._data[option].value = value
def freeze(self):
self._freeze = True
def propagate_upstream(self, package_values, down_ref, own_ref, pattern_options):
"""
:param: package_values: PackageOptionValues({"shared": "True"}
:param: pattern_options: Keys from the "package_values" e.g. ["shared"] that shouldn't raise
if they are not existing options for the current object
"""
if not package_values:
return
for (name, value) in package_values.items():
if name in self._data and self._data.get(name) == value:
continue
if self._freeze:
raise ConanException("%s tried to change %s option %s to %s\n"
"but it was already defined as %s"
% (down_ref, own_ref, name, value, self._data.get(name)))
modified = self._modified.get(name)
if modified is not None:
modified_value, modified_ref = modified
raise ConanException("%s tried to change %s option %s to %s\n"
"but it was already assigned to %s by %s"
% (down_ref, own_ref, name, value,
modified_value, modified_ref))
else:
if name in pattern_options: # If it is a pattern-matched option, should check field
if name in self._data:
self._data[name].value = value
self._modified[name] = (value, down_ref)
else:
self._ensure_exists(name)
self._data[name].value = value
self._modified[name] = (value, down_ref)
class Options(object):
""" All options of a package, both its own options and the upstream ones.
Owned by ConanFile.
"""
def __init__(self, options):
assert isinstance(options, PackageOptions)
self._package_options = options
# Addressed only by name, as only 1 configuration is allowed
# if more than 1 is present, 1 should be "private" requirement and its options
# are not public, not overridable
self._deps_package_values = {} # {name("Boost": PackageOptionValues}
def freeze(self):
self._package_options.freeze()
for v in self._deps_package_values.values():
v.freeze()
@property
def deps_package_values(self):
return self._deps_package_values
def clear(self):
self._package_options.clear()
def __contains__(self, option):
return option in self._package_options
def __getitem__(self, item):
return self._deps_package_values.setdefault(item, PackageOptionValues())
def __getattr__(self, attr):
return getattr(self._package_options, attr)
def __setattr__(self, attr, value):
if attr[0] == "_" or attr == "values":
return super(Options, self).__setattr__(attr, value)
return setattr(self._package_options, attr, value)
def __delattr__(self, field):
try:
self._package_options.__delattr__(field)
except ConanException:
pass
@property
def values(self):
result = OptionsValues()
result._package_values = self._package_options.values
for k, v in self._deps_package_values.items():
result._reqs_options[k] = v.copy()
return result
@values.setter
def values(self, v):
assert isinstance(v, OptionsValues)
self._package_options.values = v._package_values
self._deps_package_values.clear()
for k, v in v._reqs_options.items():
self._deps_package_values[k] = v.copy()
def propagate_upstream(self, down_package_values, down_ref, own_ref):
""" used to propagate from downstream the options to the upper requirements
:param: down_package_values => {"*": PackageOptionValues({"shared": "True"})}
:param: down_ref
:param: own_ref: Reference of the current package => ConanFileReference
"""
if not down_package_values:
return
assert isinstance(down_package_values, dict)
option_values = PackageOptionValues()
# First step is to accumulate all matching patterns, in sorted()=alphabetical order
# except the exact match
for package_pattern, package_option_values in sorted(down_package_values.items()):
if own_ref.name != package_pattern and fnmatch.fnmatch(own_ref.name, package_pattern):
option_values.update(package_option_values)
# These are pattern options, shouldn't raise if not existing
pattern_options = list(option_values.keys())
# Now, update with the exact match, that has higher priority
down_options = down_package_values.get(own_ref.name)
if down_options is not None:
option_values.update(down_options)
self._package_options.propagate_upstream(option_values, down_ref, own_ref,
pattern_options=pattern_options)
# Upstream propagation to deps
for name, option_values in sorted(list(down_package_values.items())):
if name != own_ref.name:
pkg_values = self._deps_package_values.setdefault(name, PackageOptionValues())
pkg_values.propagate_upstream(option_values, down_ref, own_ref, name)
def initialize_upstream(self, user_values, name=None):
""" used to propagate from downstream the options to the upper requirements
"""
if user_values is not None:
assert isinstance(user_values, OptionsValues)
# This code is necessary to process patterns like *:shared=True
# To apply to the current consumer, which might not have name
for pattern, pkg_options in sorted(user_values._reqs_options.items()):
if fnmatch.fnmatch(name or "", pattern):
self._package_options.initialize_patterns(pkg_options)
# Then, the normal assignment of values, which could override patterns
self._package_options.values = user_values._package_values
for package_name, package_values in user_values._reqs_options.items():
pkg_values = self._deps_package_values.setdefault(package_name,
PackageOptionValues())
pkg_values.update(package_values)
def validate(self):
return self._package_options.validate()
def propagate_downstream(self, ref, options):
assert isinstance(options, OptionsValues)
self._deps_package_values[ref.name] = options._package_values
for k, v in options._reqs_options.items():
self._deps_package_values[k] = v.copy()
def clear_unused(self, references):
""" remove all options not related to the passed references,
that should be the upstream requirements
"""
existing_names = [r.ref.name for r in references]
self._deps_package_values = {k: v for k, v in self._deps_package_values.items()
if k in existing_names}
| |
import pygrtest_common
import pygr.Data
import random
import unittest
from nosebase import *
from pygr import sequence
class Conserve_Suite(unittest.TestCase):
def exonquery_megatest(self):
def printConservation(id,label,site):
if msa.seqs.IDdict: # skip if alignment is empty
for src,dest,edge in msa[site].edges(mergeMost=True):
print '%d\t%s\t%s\t%s\t%s\t%s\t%2.1f\t%2.1f' \
%(id,label,repr(src),src,idDict[dest],dest,
100*edge.pIdentity(),100*edge.pAligned())
def getConservation(id,label,site):
if msa.seqs.IDdict: # skip if alignment is empty
for src,dest,edge in msa[site].edges(mergeMost=True):
a = '%d\t%s\t%s\t%s\t%s\t%s\t%2.1f\t%2.1f' \
%(id,label,repr(src),src,idDict[dest],dest,
100*edge.pIdentity(),100*edge.pAligned())
exons = pygr.Data.getResource('Bio.Annotation.ASAP2.HUMAN.hg17.exons')
msa = pygr.Data.getResource('Bio.MSA.UCSC.hg17_multiz17way')
idDict = ~(msa.seqDict) # INVERSE: MAPS SEQ --> STRING IDENTIFIER
l = exons.keys()
coverage = 0.001 # 1% coverage -> ~90 minutes wall-clock time
for i in range(int(len(l) * coverage)):
k = random.randint(0,len(l) - 1)
id = l[k]
exon = exons[id].sequence
ss1=exon.before()[-2:] # GET THE 2 NT SPLICE SITES
ss2=exon.after()[:2]
cacheHint=msa[ss1+ss2] #CACHE THE COVERING INTERVALS FROM ss1 TO ss2
try:
getConservation(id,'ss1',ss1)
getConservation(id,'ss2',ss2)
getConservation(id,'exon',exon)
except TypeError:
print id, exon
class Blast_Suite(unittest.TestCase):
def setUp(self):
self.genomes = ['.'.join(x.split('.')[-2:]) for x in pygr.Data.dir('Bio.Seq.Genome')]
available_exons = [x for x in pygr.Data.dir('Bio.Annotation.ASAP2') if 'exons' in x and 'cDNA' not in x and 'Map' not in x]
self.available_exons = [x.replace('Bio.Annotation.ASAP2.','').replace('.exons','') for x in available_exons]
def genome_blast_megatest(self):
for genome in self.genomes:
if genome in self.available_exons:
#print genome
g = pygr.Data.getResource('Bio.Seq.Genome.%s' % genome)
exons = pygr.Data.getResource('Bio.Annotation.ASAP2.%s.exons' % genome)
it = exons.iteritems()
id, exon = it.next()
id, exon = it.next()
del it
exon2 = exon
exon = sequence.Sequence(str(exon.sequence),'1')
m = g.megablast(exon, maxseq=1, minIdentity=0.9)
if m.seqs.IDdict: # skip if alignment is empty
tmp = m[exon].edges(mergeMost=True)
if tmp:
src, dest, edge = tmp[0]
#print repr(src), repr(dest), len(tmp)
self.assertEqual(edge.pIdentity(trapOverflow=False), 1.)
#else:
#print 'no destination matches of proper length'
def all_v_all_blast_test(self):
from pygr import cnestedlist,seqdb
from pygr import sequence
stored = PygrDataTextFile('results/seqdb2.pickle','r')
old_result = stored['sp_allvall']
min_ID = 0.5
msa=cnestedlist.NLMSA('all_vs_all',mode='w',bidirectional=False) # ON-DISK
sp=seqdb.BlastDB('sp_hbb1') # OPEN SWISSPROT DATABASE
for id,s in sp.iteritems(): # FOR EVERY SEQUENCE IN SWISSPROT
sp.blast(s,msa,expmax=1e-10, verbose=False) # GET STRONG HOMOLOGS, SAVE ALIGNMENT IN msa
msa.build(saveSeqDict=True) # DONE CONSTRUCTING THE ALIGNMENT, SO BUILD THE ALIGNMENT DB INDEXES
db = msa.seqDict.dicts.keys()[0]
result = {}
for k in db.values():
edges = msa[k].edges(minAlignSize=12,pIdentityMin=min_ID)
for t in edges:
assert len(t[0]) >= 12
tmpdict = dict(map(lambda x:(x, None), [(str(t[0]), str(t[1]), t[2].pIdentity(trapOverflow=False)) for t in edges]))
result[repr(k)] = tmpdict.keys()
result[repr(k)].sort()
assert sorted(result.keys()) == sorted(old_result.keys())
for k in result:
l = result[k]
l2 = old_result[k]
assert len(l) == len(l2)
for i in range(len(l)):
src, dest, identity = l[i]
old_src, old_dest, old_identity = l2[i]
assert (src, dest) == (old_src, old_dest)
assert identity - old_identity < .0001
assert identity >= min_ID
def all_v_all_blast_save():
from pygr import cnestedlist,seqdb
working = PygrDataTextFile('results/seqdb2.pickle','w')
msa=cnestedlist.NLMSA('all_vs_all',mode='w',bidirectional=False) # ON-DISK
sp=seqdb.BlastDB('sp_hbb1') # OPEN SWISSPROT DATABASE
for id,s in sp.iteritems(): # FOR EVERY SEQUENCE IN SWISSPROT
sp.blast(s,msa,expmax=1e-10, verbose=False) # GET STRONG HOMOLOGS, SAVE ALIGNMENT IN msa
msa.build(saveSeqDict=True) # DONE CONSTRUCTING THE ALIGNMENT, SO BUILD THE ALIGNMENT DB INDEXES
db = msa.seqDict.dicts.keys()[0]
result = {}
for k in db.values():
edges = msa[k].edges(minAlignSize=12, pIdentityMin=0.5)
for t in edges:
assert len(t[0]) >= 12
tmpdict = dict(map(lambda x:(x, None), [(str(t[0]), str(t[1]), t[2].pIdentity(trapOverflow=False)) for t in edges]))
result[repr(k)] = tmpdict.keys()
result[repr(k)].sort()
working['sp_allvall'] = result
working.save()
return msa
class Blastx_Test(object):
def blastx_test(self):
from pygr import seqdb, blast
dna = seqdb.SequenceFileDB('hbb1_mouse.fa')
prot = seqdb.SequenceFileDB('sp_hbb1')
blastmap = blast.BlastxMapping(prot)
correct = [(146, 146, 438, 0.979), (146, 146, 438, 0.911), (146, 146, 438, 0.747), (146, 146, 438, 0.664), (146, 146, 438, 0.623), (146, 146, 438, 0.596), (145, 145, 435, 0.510), (143, 143, 429, 0.531), (146, 146, 438, 0.473), (146, 146, 438, 0.473), (146, 146, 438, 0.486), (144, 144, 432, 0.451), (145, 145, 435, 0.455), (144, 144, 432, 0.451), (146, 146, 438, 0.466), (146, 146, 438, 0.459), (52, 52, 156, 0.442), (90, 90, 270, 0.322), (23, 23, 69, 0.435), (120, 120, 360, 0.283), (23, 23, 69, 0.435), (120, 120, 360, 0.258), (23, 23, 69, 0.435), (120, 120, 360, 0.275), (23, 23, 69, 0.435), (120, 120, 360, 0.267)]
results = blastmap[dna['gi|171854975|dbj|AB364477.1|']]
l = []
for result in results:
for src,dest,edge in result.edges():
l.append((len(src),len(dest),len(src.sequence),
edge.pIdentity()))
assert approximate_cmp(l, correct, 0.001) == 0, 'blastx results mismatch'
try:
results = blastmap[prot['HBB1_MOUSE']]
raise AssertionError('failed to trap blastp in BlastxMapping')
except ValueError:
pass
class Tblastn_Test(object):
def tblastn_test(self):
from pygr import seqdb, blast
dna = seqdb.SequenceFileDB('hbb1_mouse.fa')
prot = seqdb.SequenceFileDB('sp_hbb1')
blastmap = blast.BlastMapping(dna)
result = blastmap[prot['HBB1_XENLA']]
src,dest,edge = iter(result.edges()).next()
assert str(src) == 'LTAHDRQLINSTWGKLCAKTIGQEALGRLLWTYPWTQRYFSSFGNLNSADAVFHNEAVAAHGEKVVTSIGEAIKHMDDIKGYYAQLSKYHSETLHVDPLNFKRFGGCLSIALARHFHEEYTPELHAAYEHLFDAIADALGKGYH'
assert str(dest) == 'LTDAEKAAVSGLWGKVNSDEVGGEALGRLLVVYPWTQRYFDSFGDLSSASAIMGNAKVKAHGKKVITAFNEGLNHLDSLKGTFASLSELHCDKLHVDPENFRLLGNMIVIVLGHHLGKDFTPAAQAAFQKVMAGVATALAHKYH'
assert str(dest.sequence) == 'CTGACTGATGCTGAGAAGGCTGCTGTCTCTGGCCTGTGGGGAAAGGTGAACTCCGATGAAGTTGGTGGTGAGGCCCTGGGCAGGCTGCTGGTTGTCTACCCTTGGACCCAGAGGTACTTTGATAGCTTTGGAGACCTATCCTCTGCCTCTGCTATCATGGGTAATGCCAAAGTGAAGGCCCATGGCAAGAAAGTGATAACTGCCTTTAACGAGGGCCTGAATCACTTGGACAGCCTCAAGGGCACCTTTGCCAGCCTCAGTGAGCTCCACTGTGACAAGCTCCATGTGGATCCTGAGAACTTCAGGCTCCTGGGCAATATGATCGTGATTGTGCTGGGCCACCACCTGGGCAAGGATTTCACCCCCGCTGCACAGGCTGCCTTCCAGAAGGTGATGGCTGGAGTGGCCACTGCCCTGGCTCACAAGTACCAC'
assert approximate_cmp([[edge.pIdentity()]], [[0.451]],
0.001)==0
blastmap = blast.BlastMapping(prot)
try:
results = blastmap[dna['gi|171854975|dbj|AB364477.1|']]
raise AssertionError('failed to trap blastx in BlastMapping')
except ValueError:
pass
def bad_subject_test(self):
from pygr import parse_blast
from pygr.nlmsa_utils import CoordsGroupStart,CoordsGroupEnd
correctCoords = ((12,63,99508,99661),
(65,96,99661,99754),
(96,108,99778,99814),
(108,181,99826,100045))
ifile = file('bad_tblastn.txt')
try:
p = parse_blast.BlastHitParser()
it = iter(correctCoords)
for ival in p.parse_file(ifile):
if not isinstance(ival,(CoordsGroupStart,
CoordsGroupEnd)):
assert (ival.src_start,ival.src_end,
ival.dest_start,ival.dest_end) \
== it.next()
finally:
ifile.close()
if __name__ == '__main__':
a=all_v_all_blast_save()
| |
"""Testing methods that normally need Handle server read access,
by patching the get request to replace read access."""
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import json
import mock
from mock import patch
sys.path.append("../..")
from b2handle.handleclient import EUDATHandleClient
from b2handle.clientcredentials import PIDClientCredentials
from b2handle.handleexceptions import HandleAlreadyExistsException
from b2handle.handleexceptions import HandleAuthenticationError
from b2handle.handleexceptions import HandleNotFoundException
from b2handle.handleexceptions import BrokenHandleRecordException
from b2handle.handleexceptions import IllegalOperationException
from b2handle.handleexceptions import ReverseLookupException
from b2handle.handleexceptions import GenericHandleError
from mockresponses import MockResponse, MockSearchResponse
from utilities import failure_message, replace_timestamps, sort_lists
class EUDATHandleClientWriteaccessPatchedTestCase(unittest.TestCase):
'''Testing methods with write access (patched server access).
The tests work by intercepting all HTTP put requests and comparing their payload to
the payload of successful real put requests from previous integration tests.
The payloads from previous tests were collected by a logger in the integration
tests (look for REQUESTLOGGER in the write-integration test code). Of course,
the names of the handles have to be adapted in there.
Comparison it done by python dictionary comparison, which ignores
the order of the record entries, whitespace, string separators and
whether keys are unicode strings or normal strings.
The timestamps should not be compared, so they should be removed. For this,
there is a method "replace_timestamps".
'''
@patch('b2handle.handleclient.EUDATHandleClient.check_if_username_exists')
def setUp(self, username_check_patch):
# Define replacement for the patched check for username existence:
username_check_patch = mock.Mock()
username_check_patch.response_value = True
# Create a client instance for write access:
self.inst = EUDATHandleClient.instantiate_with_username_and_password('http://handle.server', '999:user/name', 'apassword')
def tearDown(self):
pass
pass
def get_payload_headers_from_mockresponse(self, putpatch):
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
kwargs_passed_to_put = putpatch.call_args_list[len(putpatch.call_args_list)-1][1]
passed_payload = json.loads(kwargs_passed_to_put['data'])
replace_timestamps(passed_payload)
passed_headers = kwargs_passed_to_put['headers']
return passed_payload, passed_headers
# register_handle
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_register_handle(self, getpatch, putpatch):
"""Test registering a new handle with various types of values."""
# Define the replacement for the patched GET method:
# The handle does not exist yet, so a response with 404
mock_response_get = MockResponse(notfound=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse(wascreated=True)
putpatch.return_value = mock_response_put
# Run the code to be tested:
testhandle = 'my/testhandle'
testlocation = 'http://foo.bar'
testchecksum = '123456'
additional_URLs = ['http://bar.bar', 'http://foo.foo']
handle_returned = self.inst.register_handle(testhandle,
location=testlocation,
checksum=testchecksum,
additional_URLs=additional_URLs,
foo='foo',
bar='bar')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 100, "type": "HS_ADMIN", "data": {"value": {"index": "200", "handle": "0.NA/my", "permissions": "011111110011"}, "format": "admin"}}, {"index": 1, "type": "URL", "data": "http://foo.bar"}, {"index": 2, "type": "CHECKSUM", "data": "123456"}, {"index": 3, "type": "foo", "data": "foo"}, {"index": 4, "type": "bar", "data": "bar"}, {"index": 5, "type": "10320/LOC", "data": "<locations><location href=\"http://bar.bar\" id=\"0\" /><location href=\"http://foo.foo\" id=\"1\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='register_handle'))
@patch('b2handle.handleclient.EUDATHandleClient.check_if_username_exists')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_register_handle_different_owner(self, getpatch, putpatch, username_check_patch):
"""Test registering a new handle with various types of values."""
# Define the replacement for the patched GET method:
# The handle does not exist yet, so a response with 404
mock_response_get = MockResponse(notfound=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse(wascreated=True)
putpatch.return_value = mock_response_put
# Define replacement for the patched check for username existence:
username_check_patch = mock.Mock()
username_check_patch.response_value = True
# Make another connector, to add the handle owner:
cred = PIDClientCredentials('http://handle.server',
'999:user/name',
'apassword',
'myprefix',
'300:handle/owner')
newInst = EUDATHandleClient.instantiate_with_credentials(cred)
# Run the code to be tested:
testhandle = 'my/testhandle'
testlocation = 'http://foo.bar'
testchecksum = '123456'
additional_URLs = ['http://bar.bar', 'http://foo.foo']
handle_returned = newInst.register_handle(testhandle,
location=testlocation,
checksum=testchecksum,
additional_URLs=additional_URLs,
foo='foo',
bar='bar')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 100, "type": "HS_ADMIN", "data": {"value": {"index": "300", "handle": "handle/owner", "permissions": "011111110011"}, "format": "admin"}}, {"index": 1, "type": "URL", "data": "http://foo.bar"}, {"index": 2, "type": "CHECKSUM", "data": "123456"}, {"index": 3, "type": "foo", "data": "foo"}, {"index": 4, "type": "bar", "data": "bar"}, {"index": 5, "type": "10320/LOC", "data": "<locations><location href=\"http://bar.bar\" id=\"0\" /><location href=\"http://foo.foo\" id=\"1\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='register_handle'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_register_handle_already_exists(self, getpatch, putpatch):
"""Test if overwrite=False prevents handle overwriting."""
# Define the replacement for the patched GET method:
mock_response_get = MockResponse(success=True)
getpatch.return_value = mock_response_get
# Run code to be tested + check exception:
with self.assertRaises(HandleAlreadyExistsException):
self.inst.register_handle('my/testhandle',
'http://foo.foo',
test1='I am just an illusion.',
overwrite=False)
# Check if nothing was changed (PUT should not have been called):
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called! ('+str(putpatch.call_count)+' times). It should NOT have been called.')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_register_handle_already_exists_overwrite(self, getpatch, putpatch):
"""Test registering an existing handle with various types of values, with overwrite=True."""
# Define the replacement for the patched GET method:
mock_response_get = MockResponse(success=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse(wascreated=True)
putpatch.return_value = mock_response_put
# Run the method to be tested:
testhandle = 'my/testhandle'
testlocation = 'http://foo.bar'
testchecksum = '123456'
overwrite = True
additional_URLs = ['http://bar.bar', 'http://foo.foo']
handle_returned = self.inst.register_handle(testhandle,
location=testlocation,
checksum=testchecksum,
additional_URLs=additional_URLs,
overwrite=overwrite,
foo='foo',
bar='bar')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload+headers passed to "requests.put"
passed_payload, passed_headers = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 100, "type": "HS_ADMIN", "data": {"value": {"index": "200", "handle": "0.NA/my", "permissions": "011111110011"}, "format": "admin"}}, {"index": 1, "type": "URL", "data": "http://foo.bar"}, {"index": 2, "type": "CHECKSUM", "data": "123456"}, {"index": 3, "type": "foo", "data": "foo"}, {"index": 4, "type": "bar", "data": "bar"}, {"index": 5, "type": "10320/LOC", "data": "<locations><location href=\"http://bar.bar\" id=\"0\" /><location href=\"http://foo.foo\" id=\"1\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='register_handle'))
# Check if requests.put received an authorization header:
self.assertIn('Authorization', passed_headers,
'Authorization header not passed: '+str(passed_headers))
# generate_and_register_handle
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_generate_and_register_handle(self, getpatch, putpatch):
"""Test generating and registering a new handle."""
# Define the replacement for the patched GET method:
mock_response_get = MockResponse(notfound=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse(wascreated=True)
putpatch.return_value = mock_response_put
# Run the method to be tested:
testlocation = 'http://foo.bar'
testchecksum = '123456'
handle_returned = self.inst.generate_and_register_handle(
prefix='my',
location=testlocation,
checksum=testchecksum)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 100, "type": "HS_ADMIN", "data": {"value": {"index": "200", "handle": "0.NA/my", "permissions": "011111110011"}, "format": "admin"}}, {"index": 1, "type": "URL", "data": "http://foo.bar"}, {"index": 2, "type": "CHECKSUM", "data": "123456"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='generate_and_register_handle'))
# modify_handle_value
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_one(self, getpatch, putpatch):
"""Test modifying one existing handle value."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":333,"type":"test3","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
testhandle = 'my/testhandle'
self.inst.modify_handle_value(testhandle, test4='newvalue')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 4, "ttl": 86400, "type": "test4", "data": "newvalue"}, {"index": 111, "ttl": 86400, "type": "test1", "timestamp": "2015-09-30T13:57:03Z", "data": {"value": "val1", "format": "string"}}, {"index": 2222, "ttl": 86400, "type": "test2", "timestamp": "2015-09-30T13:57:03Z", "data": {"value": "val2", "format": "string"}}, {"index": 333, "ttl": 86400, "type": "test3", "timestamp": "2015-09-30T13:57:03Z", "data": {"value": "val3", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='modify_handle_value'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_several(self, getpatch, putpatch):
"""Test modifying several existing handle values."""
# Define the replacement for the patched GET method:
cont = {
"responseCode":1,
"handle":"my/testhandle",
"values":[
{
"index":111,
"type":"test1",
"data":{
"format":"string",
"value":"val1"
},
"ttl":86400,
"timestamp":"2015-09-29T15:51:08Z"
},{
"index":2222,
"type":"test2",
"data":{
"format":"string",
"value":"val2"
},
"ttl":86400,
"timestamp":"2015-09-29T15:51:08Z"
},{
"index":333,
"type":"test3",
"data":{
"format":"string",
"value":"val3"
},
"ttl":86400,
"timestamp":"2015-09-29T15:51:08Z"
},{
"index":4,
"type":"test4",
"data":{
"format":"string",
"value":"val4"
},
"ttl":86400,
"timestamp":"2015-09-29T15:51:08Z"
}]
}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Test variables
testhandle = 'my/testhandle'
# Run the method to be tested:
self.inst.modify_handle_value(testhandle,
test4='new4',
test2='new2',
test3='new3')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
sort_lists(passed_payload)
# Compare with expected payload:
expected_payload = {
"values":[
{
"index":111,
"type":"test1",
"data":{
"format":"string",
"value":"val1"
},
"ttl":86400,
"timestamp":"2015-09-29T15:51:08Z"
},{
"index":2222,
"type":"test2",
"data":"new2",
"ttl":86400,
},{
"index":333,
"type":"test3",
"data":"new3",
"ttl":86400,
},{
"index":4,
"type":"test4",
"data":"new4",
"ttl":86400,
}]
}
replace_timestamps(expected_payload)
sort_lists(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='modify_handle_value'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_corrupted(self, getpatch, putpatch):
"""Test exception when trying to modify corrupted handle record."""
# Define the replacement for the patched GET method (getting a corrupted record):
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":333,"type":"test2","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Call the method to be tested: Modifying corrupted raises exception:
with self.assertRaises(BrokenHandleRecordException):
self.inst.modify_handle_value('my/testhandle',
test4='new4',
test2='new2',
test3='new3')
# Check if PUT was called (PUT should not have been called):
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called! ('+str(putpatch.call_count)+' times). It should NOT have been called.')
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_without_authentication(self, getpatch, putpatch):
"""Test if exception when not authenticated."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":333,"type":"test3","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Test variables
inst_readonly = EUDATHandleClient('http://foo.com', HTTP_verify=True)
testhandle = 'my/testhandle'
# Run code to be tested and check exception:
with self.assertRaises(HandleAuthenticationError):
inst_readonly.modify_handle_value(testhandle, foo='bar')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_several_inexistent(self, getpatch, putpatch):
"""Test modifying several existing handle values, one of them inexistent."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":333,"type":"test3","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Test variables
testhandle = 'my/testhandle'
# Run the method to be tested:
self.inst.modify_handle_value(testhandle,
test4='new4',
test2='new2',
test100='new100')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 2, "type": "test100", "data": "new100"}, {"index": 2222, "ttl": 86400, "type": "test2", "data": "new2"}, {"index": 4, "ttl": 86400, "type": "test4", "data": "new4"}, {"index": 111, "ttl": 86400, "type": "test1", "timestamp": "2015-09-30T20:38:59Z", "data": {"value": "val1", "format": "string"}}, {"index": 333, "ttl": 86400, "type": "test3", "timestamp": "2015-09-30T20:38:59Z", "data": {"value": "val3", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='modify_handle_value'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_several_inexistent_2(self, getpatch, putpatch):
"""Test modifying several existing handle values, SEVERAL of them inexistent."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":333,"type":"test3","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Test variables
testhandle = 'my/testhandle'
# Run the method to be tested:
self.inst.modify_handle_value(testhandle,
test4='new4',
test2='new2',
test100='new100',
test101='new101')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 2, "type": "test101", "data": "new101"},{"index": 3, "type": "test100", "data": "new100"}, {"index": 2222, "ttl": 86400, "type": "test2", "data": "new2"}, {"index": 4, "ttl": 86400, "type": "test4", "data": "new4"}, {"index": 111, "ttl": 86400, "type": "test1", "timestamp": "2015-09-30T20:38:59Z", "data": {"value": "val1", "format": "string"}}, {"index": 333, "ttl": 86400, "type": "test3", "timestamp": "2015-09-30T20:38:59Z", "data": {"value": "val3", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='modify_handle_value'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_modify_handle_value_HS_ADMIN(self, getpatch, putpatch):
"""Test exception when trying to modify HS_ADMIN."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":333,"type":"test3","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-29T15:51:08Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Test variables
testhandle = 'my/testhandle'
# Run the method to be tested and check exception:
with self.assertRaises(IllegalOperationException):
self.inst.modify_handle_value(testhandle, HS_ADMIN='please let me in!')
# delete_handle_value:
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_delete_handle_value_one_entry(self, getpatch, deletepatch):
"""Test deleting one entry from a record."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":333,"type":"test2","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse()
deletepatch.return_value = mock_response_del
# Call the method to be tested:
self.inst.delete_handle_value('my/testhandle', 'test1')
# Get the args passed to "requests.delete"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
positional_args_passed_to_delete = deletepatch.call_args_list[len(deletepatch.call_args_list)-1][0]
passed_url = positional_args_passed_to_delete[0]
# Compare with expected URL:
self.assertIn('?index=111',passed_url,
'The index 111 is not specified in the URL '+passed_url+'. This is serious!')
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_delete_handle_value_several_entries(self, getpatch, deletepatch):
"""Test deleting several entries from a record."""
# Test variables
testhandle = 'my/testhandle'
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":testhandle,"values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":333,"type":"test2","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse()
deletepatch.return_value = mock_response_del
# Call the method to be tested:
self.inst.delete_handle_value(testhandle, ['test1', 'test2'])
# Get the args passed to "requests.delete"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
positional_args_passed_to_delete = deletepatch.call_args_list[len(deletepatch.call_args_list)-1][0]
passed_url = positional_args_passed_to_delete[0]
# Compare with expected URL:
self.assertIn('index=111',passed_url,
'The index 111 is not specified in the URL '+passed_url+'. This may be serious!')
self.assertIn('index=222',passed_url,
'The index 2222 is not specified in the URL '+passed_url+'. This may be serious!')
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_delete_handle_value_inexistent_entry(self, getpatch, deletepatch):
"""Test deleting one inexistent entry from a record."""
# Test variables
testhandle = 'my/testhandle'
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":testhandle,"values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":333,"type":"test2","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse()
deletepatch.return_value = mock_response_del
# Call the method to be tested:
self.inst.delete_handle_value(testhandle, 'test100')
# Check if PUT was called (PUT should not have been called):
self.assertEqual(deletepatch.call_count, 0,
'The method "requests.put" was called! ('+str(deletepatch.call_count)+' times). It should NOT have been called.')
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_delete_handle_value_several_entries_one_nonexistent(self, getpatch, deletepatch):
"""Test deleting several entries from a record, one of them does not exist."""
# Test variables
testhandle = 'my/testhandle'
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":testhandle,"values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":333,"type":"test2","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse()
deletepatch.return_value = mock_response_del
# Call the method to be tested:
self.inst.delete_handle_value(testhandle, ['test1', 'test100'])
# Get the args passed to "requests.delete"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
positional_args_passed_to_delete = deletepatch.call_args_list[len(deletepatch.call_args_list)-1][0]
passed_url = positional_args_passed_to_delete[0]
# Compare with expected URL:
self.assertIn('index=111',passed_url,
'The index 111 is not specified in the URL '+passed_url+'. This may be serious!')
self.assertNotIn('&index=',passed_url,
'A second index was specified in the URL '+passed_url+'. This may be serious!')
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_delete_handle_value_several_occurrences(self, getpatch, deletepatch):
"""Test trying to delete from a corrupted handle record."""
# Define the replacement for the patched GET method (getting a corrupted record):
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":111,"type":"test1","data":{"format":"string","value":"val1"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":2222,"type":"test2","data":{"format":"string","value":"val2"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":333,"type":"test2","data":{"format":"string","value":"val3"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"},{"index":4,"type":"test4","data":{"format":"string","value":"val4"},"ttl":86400,"timestamp":"2015-09-30T15:08:49Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse()
deletepatch.return_value = mock_response_del
# Call the method to be tested:
self.inst.delete_handle_value('my/testhandle', 'test2')
# Get the args passed to "requests.delete"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
positional_args_passed_to_delete = deletepatch.call_args_list[len(deletepatch.call_args_list)-1][0]
passed_url = positional_args_passed_to_delete[0]
# Compare with expected URL:
self.assertIn('index=2222',passed_url,
'The index 2222 is not specified in the URL '+passed_url+'. This may be serious!')
self.assertIn('index=333',passed_url,
'The index 333 is not specified in the URL '+passed_url+'. This may be serious!')
# Check if PUT was called once:
self.assertEqual(deletepatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(deletepatch.call_count)+' times.')
# delete_handle:
@patch('b2handle.handleclient.requests.delete')
def test_delete_handle(self, deletepatch):
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse(success=True)
deletepatch.return_value = mock_response_del
# Call method to be tested:
self.inst.delete_handle('my/testhandle')
# Get the args passed to "requests.delete"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
positional_args_passed_to_delete = deletepatch.call_args_list[len(deletepatch.call_args_list)-1][0]
passed_url = positional_args_passed_to_delete[0]
# Compare with expected URL:
self.assertNotIn('index=',passed_url,
'Indices were passed to the delete method.')
@patch('b2handle.handleclient.requests.delete')
@patch('b2handle.handleclient.requests.get')
def test_delete_handle_inexistent(self, getpatch, deletepatch):
# Define the replacement for the patched GET method:
mock_response_get = MockResponse(notfound=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.delete method:
mock_response_del = MockResponse(notfound=True)
deletepatch.return_value = mock_response_del
# Call method to be tested:
resp = self.inst.delete_handle('my/testhandle')
# Check if response is ok:
self.assertIsNone(resp,
'The response code when deleting inexistent handle should be None but is: '+str(resp))
def test_delete_handle_too_many_args(self):
# Call method to be tested:
with self.assertRaises(TypeError):
self.inst.delete_handle('my/testhandle', 'test1')
# 10320/LOC
# remove_additional_URL
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_remove_additional_URL(self, getpatch, putpatch):
"""Test normal removal of additional URL from 10320/LOC."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
testhandle = 'my/testhandle'
url = 'http://first.foo'
self.inst.remove_additional_URL(testhandle, url)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:32Z", "data": {"value": "www.url.foo", "format": "string"}}, {"index": 2, "ttl": 86400, "type": "10320/LOC", "timestamp": "2015-09-30T15:54:32Z", "data": {"value": "<locations><location href=\"http://second.foo\" /></locations>", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='remove_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_remove_additional_URL_toempty(self, getpatch, putpatch):
"""Test removing all URL, which should remove the whole 10320/LOC attribute."""
# Define the replacement for the patched GET method (a record with one additional URL in it):
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:33Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href=\"http://second.foo\" /></locations>"},"ttl":86400,"timestamp":"2015-09-30T15:54:33Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
testhandle = 'my/testhandle'
url2 = 'http://second.foo'
self.inst.remove_additional_URL(testhandle, url2)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:33Z", "data": {"value": "www.url.foo", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='remove_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_remove_additional_URL_several(self, getpatch, putpatch):
"""Test removing all URL at the same time, which should remove the whole 10320/LOC attribute."""
# Test variables
testhandle = 'my/testhandle'
url1 = 'http://first.foo'
url2 = 'http://second.foo'
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":testhandle,"values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":testhandle}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run code to be tested:
self.inst.remove_additional_URL(testhandle, url1, url2)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
# For help, please see: http://www.voidspace.org.uk/python/mock/examples.html#checking-multiple-calls-with-mock
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:32Z", "data": {"value": "www.url.foo", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='remove_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_remove_additional_URL_inexistent_handle(self, getpatch, putpatch):
"""Test normal removal of additional URL from an inexistent handle."""
# Test variables
testhandle = 'my/testhandle'
url = 'http://first.foo'
# Define the replacement for the patched GET method:
mock_response_get = MockResponse(notfound=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse(notfound=True)
putpatch.return_value = mock_response_put
# Run code to be tested + check exception:
with self.assertRaises(HandleNotFoundException):
self.inst.remove_additional_URL(testhandle, url)
# exchange_additional_URL
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_exchange_additional_URL_normal(self, getpatch, putpatch):
"""Test replacing an URL."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
old = 'http://first.foo'
new = 'http://newfirst.foo'
self.inst.exchange_additional_URL(
'my/testhandle',
old, new)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:32Z", "data": {"value": "www.url.foo", "format": "string"}}, {"index": 2, "ttl": 86400, "type": "10320/LOC", "timestamp": "2015-09-30T15:54:32Z", "data": {"value": "<locations><location href=\"http://newfirst.foo\" /><location href=\"http://second.foo\" /></locations>", "format": "string"}}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload,
passed=passed_payload,
methodname='exchange_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_exchange_additional_URL_doesnotexist(self, getpatch, putpatch):
"""Test if replacing an inexistent URL has any effect."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
inexistent_old = 'http://sodohfasdkfjhanwikfhbawkedfhbawe.foo'
new = 'http://newfirst.foo'
self.inst.exchange_additional_URL(
'my/testhandle',
inexistent_old, new)
# Check if the PUT request was sent:
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called '+str(putpatch.call_count)+' times - it should not be called at all.')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_exchange_additional_URL_no10320loc(self, getpatch, putpatch):
"""Test if replacing an URL has any effect if there is no 10320/LOC."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
old = 'http://first.foo'
new = 'http://newfirst.foo'
self.inst.exchange_additional_URL(
'my/testhandle',
old, new)
# Check if the PUT request was sent:
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called '+str(putpatch.call_count)+' times - it should not be called at all.')
# add_additional_URL
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_add_additional_URL_first(self, getpatch, putpatch):
"""Test adding the first additional URL'(created the 10320/LOC entry)."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:32Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
url = 'http://first.foo'
self.inst.add_additional_URL('my/testhandle', url)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 1,
'The method "requests.put" was not called once, but '+str(putpatch.call_count)+' times.')
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:30Z", "data": {"value": "www.url.foo", "format": "string"}}, {"index": 2, "type": "10320/LOC", "data": "<locations><location href=\"http://first.foo\" id=\"0\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='add_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_add_additional_URL_another(self, getpatch, putpatch):
"""Test adding an additional URL."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:30Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:30Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
url = 'http://third.foo'
self.inst.add_additional_URL('my/testhandle', url)
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:30Z", "data": {"value": "www.url.foo", "format": "string"}}, {"index": 2, "ttl": 86400, "type": "10320/LOC", "timestamp": "2015-09-30T15:54:30Z", "data": "<locations><location href=\"http://first.foo\" /><location href=\"http://second.foo\" /><location href=\"http://third.foo\" id=\"0\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='add_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_add_additional_URL_several(self, getpatch, putpatch):
"""Test adding several (3) additional URLs."""
# Define the replacement for the patched GET method:
cont = {
"responseCode":1,"handle":"my/testhandle",
"values":[
{
"index":1,
"type":"URL",
"data":{
"format":"string",
"value":"www.url.foo"
},"ttl":86400,
"timestamp":"2015-09-30T15:54:31Z"
},{
"index":2,
"type":"10320/LOC",
"data":{
"format":"string",
"value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "
}, "ttl":86400,"timestamp":"2015-09-30T15:54:31Z"
}
]
}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
url1 = 'http://one'
url2 = 'http://two'
url3 = 'http://three'
self.inst.add_additional_URL('my/testhandle', url1, url2, url3)
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:31Z", "data": {"value": "www.url.foo", "format": "string"}}, {"index": 2, "ttl": 86400, "type": "10320/LOC", "timestamp": "2015-09-30T15:54:31Z", "data": "<locations><location href=\"http://first.foo\" /><location href=\"http://second.foo\" /><location href=\"http://one\" id=\"0\" /><location href=\"http://two\" id=\"1\" /><location href=\"http://three\" id=\"2\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='add_additional_URL'))
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_add_additional_URL_to_inexistent_handle(self, getpatch, putpatch):
"""Test exception if handle does not exist."""
# Define the replacement for the patched GET method:
mock_response_get = MockResponse(notfound=True)
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Run the method to be tested:
url = 'http://first.foo'
with self.assertRaises(HandleNotFoundException):
self.inst.add_additional_URL('my/testhandle', url)
# Check if the PUT request was sent:
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called '+str(putpatch.call_count)+' times - it should not be called at all.')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_add_additional_URL_alreadythere(self, getpatch, putpatch):
"""Test adding an URL that is already there."""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"my/testhandle","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:30Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:30Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":"my/testhandle"}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run the method to be tested:
url = 'http://first.foo'
self.inst.add_additional_URL('my/testhandle', url)
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called '+str(putpatch.call_count)+' times (should be 0).')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_GenericHandleError(self, getpatch, putpatch):
"""Test causing a Generic Handle Exception.
This should never happen, but this exception was designed for the
really unexpected things, so to make sure it works, I invent a
ver broken illegal action here.
"""
# Define the replacement for the patched GET method:
cont = {"responseCode":1,"handle":"not/me","values":[{"index":1,"type":"URL","data":{"format":"string","value":"www.url.foo"},"ttl":86400,"timestamp":"2015-09-30T15:54:30Z"},{"index":2,"type":"10320/LOC","data":{"format":"string","value":"<locations><location href = 'http://first.foo' /><location href = 'http://second.foo' /></locations> "},"ttl":86400,"timestamp":"2015-09-30T15:54:30Z"}]}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
mock_response_put = MockResponse()
putpatch.return_value = mock_response_put
# Run the method to be tested:
with self.assertRaises(GenericHandleError):
self.inst.retrieve_handle_record_json('my/testhandle')
# Check if the PUT request was sent exactly once:
self.assertEqual(putpatch.call_count, 0,
'The method "requests.put" was called '+str(putpatch.call_count)+' times. It should not have been called at all.')
@patch('b2handle.handleclient.requests.put')
@patch('b2handle.handleclient.requests.get')
def test_add_additional_URL_several_toempty(self, getpatch, putpatch):
"""Test adding several (3) additional URLs to a handle that has no 10320/LOC."""
# Test variables
testhandle = 'my/testhandle'
url1 = 'http://one'
url2 = 'http://two'
url3 = 'http://three'
# Define the replacement for the patched GET method:
cont = {
"responseCode":1,
"handle":testhandle,
"values":[
{
"index":1,
"type":"URL",
"data":{
"format":"string",
"value":"www.url.foo"
},
"ttl":86400,
"timestamp":"2015-09-30T15:54:31Z"
}
]
}
mock_response_get = MockResponse(status_code=200, content=json.dumps(cont))
getpatch.return_value = mock_response_get
# Define the replacement for the patched requests.put method:
cont = {"responseCode":1,"handle":testhandle}
mock_response_put = MockResponse(status_code=200, content=json.dumps(cont))
putpatch.return_value = mock_response_put
# Run code to be tested:
self.inst.add_additional_URL(testhandle, url1, url2, url3)
# Get the payload+headers passed to "requests.put"
passed_payload, _ = self.get_payload_headers_from_mockresponse(putpatch)
# Compare with expected payload:
expected_payload = {"values": [{"index": 1, "ttl": 86400, "type": "URL", "timestamp": "2015-09-30T15:54:31Z", "data": {"value": "www.url.foo", "format": "string"}}, {"index": 2, "type": "10320/LOC", "timestamp": "2015-09-30T15:54:31Z", "data": "<locations><location href=\"http://one\" id=\"0\" /><location href=\"http://two\" id=\"1\" /><location href=\"http://three\" id=\"2\" /></locations>"}]}
replace_timestamps(expected_payload)
self.assertEqual(passed_payload, expected_payload,
failure_message(expected=expected_payload, passed=passed_payload, methodname='add_additional_URL'))
# search_handle
@patch('b2handle.handleclient.requests.get')
@patch('b2handle.handleclient.EUDATHandleClient.check_if_username_exists')
def test_search_handle_wrong_url(self, usernamepatch, getpatch):
"""Test exception when wrong search servlet URL is given."""
# Define the replacement for the patched check_if_username_exists method:
mock_response_user = MockResponse(success=True)
usernamepatch.return_value = mock_response_user
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(wrong_url=True)
getpatch.return_value = mock_response_get
# Setup client for searching with existent but wrong url (google.com):
inst = EUDATHandleClient.instantiate_with_username_and_password(
"url_https",
"100:user/name",
"password",
reverselookup_baseuri='http://www.google.com',
HTTP_verify=True)
# Run code to be tested + check exception:
with self.assertRaises(ReverseLookupException):
self.inst.search_handle(URL='*')
@patch('b2handle.handleclient.requests.get')
@patch('b2handle.handleclient.EUDATHandleClient.check_if_username_exists')
def test_search_handle_handleurl(self, usernamepatch, getpatch):
"""Test exception when wrong search servlet URL (Handle Server REST API URL) is given."""
# Define the replacement for the patched check_if_username_exists method:
mock_response_user = MockResponse(success=True)
usernamepatch.return_value = mock_response_user
# Define the replacement for the patched GET method:
mock_response_search = MockSearchResponse(handle_url=True)
getpatch.return_value = mock_response_search
# Setup client for searching with Handle Server url:
inst = EUDATHandleClient.instantiate_with_username_and_password(
"url_https",
"100:user/name",
"password",
reverselookup_url_extension='/api/handles/',
HTTP_verify=True)
# Run code to be tested + check exception:
with self.assertRaises(ReverseLookupException):
self.inst.search_handle(URL='*')
@patch('b2handle.handleclient.requests.get')
def test_search_handle(self, getpatch):
"""Test searching for handles with any url (server should return list of handles)."""
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(success=True)
getpatch.return_value = mock_response_get
# Run code to be tested:
val = self.inst.search_handle(URL='*')
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
self.assertTrue(len(val) > 0,
'')
self.assertTrue(self.inst.check_handle_syntax(val[0]),
'')
@patch('b2handle.handleclient.requests.get')
def test_search_handle_emptylist(self, getpatch):
"""Test empty search result."""
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(empty=True)
getpatch.return_value = mock_response_get
# Run code to be tested:
val = self.inst.search_handle(URL='noturldoesnotexist')
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
self.assertEqual(len(val),0,
'')
@patch('b2handle.handleclient.requests.get')
def test_search_handle_for_url(self, getpatch):
"""Test searching for url with wildcards."""
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(success=True)
getpatch.return_value = mock_response_get
# Run code to be tested:
val = self.inst.search_handle(URL='*dkrz*')
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
# Run code to be tested:
val = self.inst.search_handle('*dkrz*')
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
if False:
# At the moment, two keywords can not be searched!
@patch('b2handle.handleclient.requests.get')
def test_search_handle_for_url_and_checksum(self, getpatch):
"""Test searching for url and checksum with wildcards."""
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(success=True)
getpatch.return_value = mock_response_get
# Run code to be tested:
val = self.inst.search_handle('*dkrz*', CHECKSUM='*123*')
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
# Run code to be tested:
val = self.inst.search_handle(URL='*dkrz*', CHECKSUM='*123*')
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
@patch('b2handle.handleclient.requests.get')
def test_search_handle_prefixfilter(self, getpatch):
"""Test filtering for prefixes."""
prefix = "11111"
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(prefix=prefix)
getpatch.return_value = mock_response_get
# Run code to be tested:
val = self.inst.search_handle(URL='*dkrz*', prefix=prefix)
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
for item in val:
self.assertEqual(item.split('/')[0], prefix)
@patch('b2handle.handleclient.requests.get')
def test_search_handle_prefixfilter_realprefix(self, getpatch):
"""Test filtering for prefixes."""
prefix = "10876.test"
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(prefix=prefix)
getpatch.return_value = mock_response_get
# Run code to be tested:
val = self.inst.search_handle(URL='*dkrz*', prefix=prefix)
# Check desired outcome:
self.assertEqual(type(val),type([]),
'')
for item in val:
self.assertEqual(item.split('/')[0], prefix)
@patch('b2handle.handleclient.requests.get')
def test_search_handle_fulltext(self, getpatch):
"""Test filtering for prefixes."""
prefix = "10876.test"
# Define the replacement for the patched GET method:
mock_response_get = MockSearchResponse(prefix=prefix)
getpatch.return_value = mock_response_get
# Run code to be tested + check exception:
with self.assertRaises(ReverseLookupException):
self.inst.search_handle(URL='*dkrz*', searchterms=['foo','bar'])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# License: MIT (see LICENSE file provided)
# vim600: fdm=marker tabstop=4 shiftwidth=4 expandtab ai
# Description {{{
"""
**polib** allows you to manipulate, create, modify gettext files (pot, po
and mo files). You can load existing files, iterate through it's entries,
add, modify entries, comments or metadata, etc... or create new po files
from scratch.
**polib** provides a simple and pythonic API, exporting only three
convenience functions (*pofile*, *mofile* and *detect_encoding*), and the
four core classes, *POFile*, *MOFile*, *POEntry* and *MOEntry* for creating
new files/entries.
**Basic example**:
>>> import polib
>>> # load an existing po file
>>> po = polib.pofile('tests/test_utf8.po')
>>> for entry in po:
... # do something with entry...
... pass
>>> # add an entry
>>> entry = polib.POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', '12'), ('anotherfile.py', '34')]
>>> po.append(entry)
>>> # to save our modified po file:
>>> # po.save()
>>> # or you may want to compile the po file
>>> # po.save_as_mofile('tests/test_utf8.mo')
"""
# }}}
__author__ = 'David JEAN LOUIS <izimobil@gmail.com>'
__version__ = '0.3.1'
# dependencies {{{
try:
import struct
import textwrap
import warnings
except ImportError, exc:
raise ImportError('polib requires python 2.3 or later with the standard' \
' modules "struct", "textwrap" and "warnings" (details: %s)' % exc)
# }}}
__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
'detect_encoding', 'quote', 'unquote']
# shortcuts for performance improvement {{{
# yes, yes, this is quite ugly but *very* efficient
_dictget = dict.get
_listappend = list.append
_listpop = list.pop
_strjoin = str.join
_strsplit = str.split
_strstrip = str.strip
_strreplace = str.replace
_textwrap = textwrap.wrap
# }}}
default_encoding = 'utf-8'
def pofile(fpath, **kwargs):
"""
Convenience function that parse the po/pot file *fpath* and return
a POFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the po/pot file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_utf8.po')
>>> po #doctest: +ELLIPSIS
<POFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.po', 'test_utf8.po']:
... orig_po = polib.pofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_po.save(tmpf)
... try:
... new_po = polib.pofile(tmpf)
... for old, new in zip(orig_po, new_po):
... if old.msgid != new.msgid:
... old.msgid
... new.msgid
... if old.msgstr != new.msgstr:
... old.msgid
... new.msgid
... finally:
... os.unlink(tmpf)
"""
# pofile {{{
if _dictget(kwargs, 'autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = _dictget(kwargs, 'encoding', default_encoding)
parser = _POFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = _dictget(kwargs, 'wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
def mofile(fpath, **kwargs):
"""
Convenience function that parse the mo file *fpath* and return
a MOFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the mo file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext to generate the po file that was used to format
the mo file (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> mo = polib.mofile('tests/test_utf8.mo')
>>> mo #doctest: +ELLIPSIS
<MOFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.mo', 'test_utf8.mo']:
... orig_mo = polib.mofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_mo.save(tmpf)
... try:
... new_mo = polib.mofile(tmpf)
... for old, new in zip(orig_mo, new_mo):
... if old.msgid != new.msgid:
... old.msgstr
... new.msgstr
... finally:
... os.unlink(tmpf)
"""
# mofile {{{
if _dictget(kwargs, 'autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = _dictget(kwargs, 'encoding', default_encoding)
parser = _MOFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = _dictget(kwargs, 'wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
def detect_encoding(fpath):
"""
Try to detect the encoding used by the file *fpath*. The function will
return polib default *encoding* if it's unable to detect it.
**Keyword argument**:
- *fpath*: string, full or relative path to the mo file to parse.
**Examples**:
>>> print detect_encoding('tests/test_noencoding.po')
utf-8
>>> print detect_encoding('tests/test_utf8.po')
UTF-8
>>> print detect_encoding('tests/test_utf8.mo')
UTF-8
>>> print detect_encoding('tests/test_iso-8859-15.po')
ISO_8859-15
>>> print detect_encoding('tests/test_iso-8859-15.mo')
ISO_8859-15
"""
# detect_encoding {{{
import re
rx = re.compile(r'"?Content-Type:.+? charset=([\w_\-:\.]+)')
f = open(fpath)
for l in f:
match = rx.search(l)
if match:
f.close()
return _strstrip(match.group(1))
f.close()
return default_encoding
# }}}
def quote(st):
"""
Quote and return the given string *st*.
**Examples**:
>>> quote('\\t and \\n and \\r and " and \\\\')
'\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\'
"""
# quote {{{
st = _strreplace(st, '\\', r'\\')
st = _strreplace(st, '\t', r'\t')
st = _strreplace(st, '\r', r'\r')
st = _strreplace(st, '\n', r'\n')
st = _strreplace(st, '\"', r'\"')
return st
# }}}
def unquote(st):
"""
Unquote and return the given string *st*.
**Examples**:
>>> unquote('\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\')
'\\t and \\n and \\r and " and \\\\'
"""
# unquote {{{
st = _strreplace(st, r'\"', '"')
st = _strreplace(st, r'\n', '\n')
st = _strreplace(st, r'\r', '\r')
st = _strreplace(st, r'\t', '\t')
st = _strreplace(st, r'\\', '\\')
return st
# }}}
class _BaseFile(list):
"""
Common parent class for POFile and MOFile classes.
This class must **not** be instanciated directly.
"""
# class _BaseFile {{{
def __init__(self, fpath=None, wrapwidth=78, encoding=default_encoding):
"""
Constructor.
**Keyword arguments**:
- *fpath*: string, path to po or mo file
- *wrapwidth*: integer, the wrap width, only useful when -w option
was passed to xgettext to generate the po file that was used to
format the mo file, default to 78 (optional).
"""
list.__init__(self)
# the opened file handle
self.fpath = fpath
# the width at which lines should be wrapped
self.wrapwidth = wrapwidth
# the file encoding
self.encoding = encoding
# header
self.header = ''
# both po and mo files have metadata
self.metadata = {}
self.metadata_is_fuzzy = 0
def __str__(self):
"""String representation of the file."""
ret = []
entries = [self.metadata_as_entry()] + \
[e for e in self if not e.obsolete]
for entry in entries:
_listappend(ret, entry.__str__(self.wrapwidth))
for entry in self.obsolete_entries():
_listappend(ret, entry.__str__(self.wrapwidth))
return _strjoin('\n', ret)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def metadata_as_entry(self):
"""Return the metadata as an entry"""
e = POEntry(msgid='')
mdata = self.ordered_metadata()
if mdata:
strs = []
for name, value in mdata:
# Strip whitespace off each line in a multi-line entry
value = _strjoin('\n', [_strstrip(v)
for v in _strsplit(value, '\n')])
_listappend(strs, '%s: %s' % (name, value))
e.msgstr = _strjoin('\n', strs) + '\n'
return e
def save(self, fpath=None, repr_method='__str__'):
"""
Save the po file to file *fpath* if no file handle exists for
the object. If there's already an open file and no fpath is
provided, then the existing file is rewritten with the modified
data.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
- *repr_method*: string, the method to use for output.
"""
if self.fpath is None and fpath is None:
raise IOError('You must provide a file path to save() method')
contents = getattr(self, repr_method)()
if fpath is None:
fpath = self.fpath
mode = 'w'
if repr_method == 'to_binary':
mode += 'b'
fhandle = open(fpath, mode)
fhandle.write(contents)
fhandle.close()
def find(self, st, by='msgid'):
"""
Find entry which msgid (or property identified by the *by*
attribute) matches the string *st*.
**Keyword arguments**:
- *st*: string, the string to search for
- *by*: string, the comparison attribute
**Examples**:
>>> po = pofile('tests/test_utf8.po')
>>> entry = po.find('Thursday')
>>> entry.msgstr
'Jueves'
>>> entry = po.find('Some unexistant msgid')
>>> entry is None
True
>>> entry = po.find('Jueves', 'msgstr')
>>> entry.msgid
'Thursday'
"""
try:
return [e for e in self if getattr(e, by) == st][0]
except IndexError:
return None
def ordered_metadata(self):
"""
Convenience method that return the metadata ordered. The return
value is list of tuples (metadata name, metadata_value).
"""
# copy the dict first
metadata = self.metadata.copy()
data_order = [
'Project-Id-Version',
'Report-Msgid-Bugs-To',
'POT-Creation-Date',
'PO-Revision-Date',
'Last-Translator',
'Language-Team',
'MIME-Version',
'Content-Type',
'Content-Transfer-Encoding'
]
ordered_data = []
for data in data_order:
try:
value = metadata.pop(data)
_listappend(ordered_data, (data, value))
except KeyError:
pass
# the rest of the metadata won't be ordered there are no specs for this
keys = metadata.keys()
keys.sort()
for data in keys:
value = metadata[data]
_listappend(ordered_data, (data, value))
return ordered_data
def to_binary(self):
"""Return the mofile binary representation."""
import struct
import array
output = ''
offsets = []
ids = strs = ''
entries = self.translated_entries()
# the keys are sorted in the .mo file
def cmp(_self, other):
if _self.msgid > other.msgid:
return 1
elif _self.msgid < other.msgid:
return -1
else:
return 0
entries.sort(cmp)
# add metadata entry
mentry = self.metadata_as_entry()
mentry.msgstr = _strreplace(mentry.msgstr, '\\n', '').lstrip() + '\n'
entries = [mentry] + entries
entries_len = len(entries)
for e in entries:
# For each string, we need size and file offset. Each string is
# NUL terminated; the NUL does not count into the size.
msgid = e._decode(e.msgid)
msgstr = e._decode(e.msgstr)
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + '\0'
strs += msgstr + '\0'
# The header is 7 32-bit unsigned integers.
keystart = 7*4+16*entries_len
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic number
0, # Version
entries_len, # # of entries
7*4, # start of key index
7*4+entries_len*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
# }}}
class POFile(_BaseFile):
'''
Po (or Pot) file reader/writer.
POFile objects inherit the list objects methods.
**Example**:
>>> po = POFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry1.occurrences = [('testfile', 12),('another_file', 1)]
>>> entry1.comment = "Some useful comment"
>>> entry2 = POEntry(
... msgid="I need my dirty cheese",
... msgstr="Je veux mon sale fromage"
... )
>>> entry2.occurrences = [('testfile', 15),('another_file', 5)]
>>> entry2.comment = "Another useful comment"
>>> entry3 = POEntry(
... msgid='Some entry with quotes " \\"',
... msgstr=u'Un message unicode avec des quotes " \\"'
... )
>>> entry3.comment = "Test string quoting"
>>> po.append(entry1)
>>> po.append(entry2)
>>> po.append(entry3)
>>> po.header = "Some Header"
>>> print po
# Some Header
msgid ""
msgstr ""
<BLANKLINE>
#. Some useful comment
#: testfile:12 another_file:1
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
#. Another useful comment
#: testfile:15 another_file:5
msgid "I need my dirty cheese"
msgstr "Je veux mon sale fromage"
<BLANKLINE>
#. Test string quoting
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
# class POFile {{{
def __str__(self):
"""Return the string representation of the po file"""
ret, headers = '', _strsplit(self.header, '\n')
for header in headers:
if header[:1] in [',', ':']:
ret += '#%s\n' % header
else:
ret += '# %s\n' % header
return ret + _BaseFile.__str__(self)
def save_as_mofile(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method that return the percentage of translated
messages.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> po.percent_translated()
50
>>> po = POFile()
>>> po.percent_translated()
100
"""
total = len([e for e in self if not e.obsolete])
if total == 0:
return 100
translated = len(self.translated_entries())
return int((100.00 / float(total)) * translated)
def translated_entries(self):
"""
Convenience method that return a list of translated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.translated_entries())
6
"""
return [e for e in self if e.translated() and not e.obsolete]
def untranslated_entries(self):
"""
Convenience method that return a list of untranslated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.untranslated_entries())
6
"""
return [e for e in self if not e.translated() and not e.obsolete]
def fuzzy_entries(self):
"""
Convenience method that return the list of 'fuzzy' entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.fuzzy_entries())
2
"""
return [e for e in self if 'fuzzy' in e.flags]
def obsolete_entries(self):
"""
Convenience method that return the list of obsolete entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.obsolete_entries())
4
"""
return [e for e in self if e.obsolete]
def merge(self, refpot):
"""
XXX this could not work if encodings are different, needs thinking
and general refactoring of how polib handles encoding...
Convenience method that merge the current pofile with the pot file
provided. It behaves exactly as the gettext msgmerge utility:
- comments of this file will be preserved, but extracted comments
and occurrences will be discarded
- any translations or comments in the file will be discarded,
however dot comments and file positions will be preserved
**Keyword argument**:
- *refpot*: object POFile, the reference catalog.
**Example**:
>>> import polib
>>> refpot = polib.pofile('tests/test_merge.pot')
>>> po = polib.pofile('tests/test_merge_before.po')
>>> po.merge(refpot)
>>> expected_po = polib.pofile('tests/test_merge_after.po')
>>> str(po) == str(expected_po)
True
"""
for entry in refpot:
e = self.find(entry.msgid)
if e is None:
# entry is not in the po file, we must add it
# entry is created with msgid, occurrences and comment
self.append(POEntry(
msgid=entry.msgid,
occurrences=entry.occurrences,
comment=entry.comment
))
else:
# entry found, we update it...
e.occurrences = entry.occurrences
e.comment = entry.comment
# ok, now we must "obsolete" entries that are not in the refpot
# anymore
for entry in self:
if refpot.find(entry.msgid) is None:
entry.obsolete = True
# }}}
class MOFile(_BaseFile):
'''
Mo file reader/writer.
MOFile objects inherit the list objects methods.
**Example**:
>>> mo = MOFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry2 = POEntry(
... msgid="I need my dirty cheese",
... msgstr="Je veux mon sale fromage"
... )
>>> entry3 = MOEntry(
... msgid='Some entry with quotes " \\"',
... msgstr=u'Un message unicode avec des quotes " \\"'
... )
>>> mo.append(entry1)
>>> mo.append(entry2)
>>> mo.append(entry3)
>>> print mo
msgid ""
msgstr ""
<BLANKLINE>
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
msgid "I need my dirty cheese"
msgstr "Je veux mon sale fromage"
<BLANKLINE>
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
# class MOFile {{{
def __init__(self, fpath=None, wrapwidth=78):
"""
MOFile constructor.
See _BaseFile.__construct.
"""
_BaseFile.__init__(self, fpath, wrapwidth)
self.magic_number = None
self.version = 0
def save_as_pofile(self, fpath):
"""
Save the string representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath)
def save(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return 100
def translated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return self
def untranslated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def fuzzy_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def obsolete_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
# }}}
class _BaseEntry(object):
"""
Base class for POEntry or MOEntry objects.
This class must *not* be instanciated directly.
"""
# class _BaseEntry {{{
def __init__(self, *args, **kwargs):
"""Base Entry constructor."""
self.msgid = _dictget(kwargs, 'msgid', '')
self.msgstr = _dictget(kwargs, 'msgstr', '')
self.msgid_plural = _dictget(kwargs, 'msgid_plural', '')
self.msgstr_plural = _dictget(kwargs, 'msgstr_plural', {})
self.obsolete = _dictget(kwargs, 'obsolete', False)
self.encoding = _dictget(kwargs, 'encoding', default_encoding)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def __str__(self, wrapwidth=78):
"""
Common string representation of the POEntry and MOEntry
objects.
"""
if self.obsolete:
delflag = '#~ '
else:
delflag = ''
# write the msgid
ret = []
ret += self._str_field("msgid", delflag, "", self.msgid)
# write the msgid_plural if any
if self.msgid_plural:
ret += self._str_field("msgid_plural", delflag, "", self.msgid_plural)
if self.msgstr_plural:
# write the msgstr_plural if any
msgstrs = self.msgstr_plural
keys = msgstrs.keys()
keys.sort()
for index in keys:
msgstr = msgstrs[index]
plural_index = '[%s]' % index
ret += self._str_field("msgstr", delflag, plural_index, msgstr)
else:
# otherwise write the msgstr
ret += self._str_field("msgstr", delflag, "", self.msgstr)
_listappend(ret, '')
return _strjoin('\n', ret)
def _str_field(self, fieldname, delflag, plural_index, field):
field = self._decode(field)
lines = field.splitlines(True) # keep line breaks in strings
# potentially, we could do line-wrapping here, but textwrap.wrap
# treats whitespace too carelessly for us to use it.
if len(lines) > 1:
lines = ['']+lines # start with initial empty line
else:
lines = [field] # needed for the empty string case
ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
quote(_listpop(lines, 0)))]
for mstr in lines:
_listappend(ret, '%s"%s"' % (delflag, quote(mstr)))
return ret
def _decode(self, st):
if isinstance(st, unicode):
return st.encode(self.encoding)
return st
# }}}
class POEntry(_BaseEntry):
"""
Represents a po file entry.
**Examples**:
>>> entry = POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', 12), ('anotherfile.py', 34)]
>>> print entry
#: welcome.py:12 anotherfile.py:34
msgid "Welcome"
msgstr "Bienvenue"
<BLANKLINE>
>>> entry = POEntry()
>>> entry.occurrences = [('src/spam.c', 32), ('src/eggs.c', 45)]
>>> entry.tcomment = 'A plural translation'
>>> entry.flags.append('c-format')
>>> entry.msgid = 'I have spam but no egg !'
>>> entry.msgid_plural = 'I have spam and %d eggs !'
>>> entry.msgstr_plural[0] = "J'ai du jambon mais aucun oeuf !"
>>> entry.msgstr_plural[1] = "J'ai du jambon et %d oeufs !"
>>> print entry
# A plural translation
#: src/spam.c:32 src/eggs.c:45
#, c-format
msgid "I have spam but no egg !"
msgid_plural "I have spam and %d eggs !"
msgstr[0] "J'ai du jambon mais aucun oeuf !"
msgstr[1] "J'ai du jambon et %d oeufs !"
<BLANKLINE>
"""
# class POEntry {{{
def __init__(self, *args, **kwargs):
"""POEntry constructor."""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = _dictget(kwargs, 'comment', '')
self.tcomment = _dictget(kwargs, 'tcomment', '')
self.occurrences = _dictget(kwargs, 'occurrences', [])
self.flags = _dictget(kwargs, 'flags', [])
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
if self.obsolete:
return _BaseEntry.__str__(self)
ret = []
# comment first, if any (with text wrapping as xgettext does)
if self.comment != '':
comments = _strsplit(self._decode(self.comment), '\n')
for comment in comments:
if wrapwidth > 0 and len(comment) > wrapwidth-3:
lines = _textwrap(comment, wrapwidth,
initial_indent='#. ',
subsequent_indent='#. ',
break_long_words=False)
_listappend(ret, lines)
else:
_listappend(ret, '#. %s' % comment)
# translator comment, if any (with text wrapping as xgettext does)
if self.tcomment != '':
tcomments = _strsplit(self._decode(self.tcomment), '\n')
for tcomment in tcomments:
if wrapwidth > 0 and len(tcomment) > wrapwidth-2:
lines = _textwrap(tcomment, wrapwidth,
initial_indent='# ',
subsequent_indent='# ',
break_long_words=False)
_listappend(ret, lines)
else:
_listappend(ret, '# %s' % tcomment)
# occurrences (with text wrapping as xgettext does)
if self.occurrences:
filelist = []
for fpath, lineno in self.occurrences:
_listappend(filelist, '%s:%s' % (self._decode(fpath), lineno))
filestr = _strjoin(' ', filelist)
if wrapwidth > 0 and len(filestr)+3 > wrapwidth:
# XXX textwrap split words that contain hyphen, this is not
# what we want for filenames, so the dirty hack is to
# temporally replace hyphens with a char that a file cannot
# contain, like "*"
lines = _textwrap(_strreplace(filestr, '-', '*'),
wrapwidth,
initial_indent='#: ',
subsequent_indent='#: ',
break_long_words=False)
# end of the replace hack
for line in lines:
_listappend(ret, _strreplace(line, '*', '-'))
else:
_listappend(ret, '#: '+filestr)
# flags
if self.flags:
flags = []
for flag in self.flags:
_listappend(flags, flag)
_listappend(ret, '#, %s' % _strjoin(', ', flags))
_listappend(ret, _BaseEntry.__str__(self))
return _strjoin('\n', ret)
def __cmp__(self, other):
'''
Called by comparison operations if rich comparison is not defined.
**Tests**:
>>> a = POEntry(msgid='a', occurrences=[('b.py', 1), ('b.py', 3)])
>>> b = POEntry(msgid='b', occurrences=[('b.py', 1), ('b.py', 3)])
>>> c1 = POEntry(msgid='c1', occurrences=[('a.py', 1), ('b.py', 1)])
>>> c2 = POEntry(msgid='c2', occurrences=[('a.py', 1), ('a.py', 3)])
>>> po = POFile()
>>> po.append(a)
>>> po.append(b)
>>> po.append(c1)
>>> po.append(c2)
>>> po.sort()
>>> print po
#
msgid ""
msgstr ""
<BLANKLINE>
#: a.py:1 a.py:3
msgid "c2"
msgstr ""
<BLANKLINE>
#: a.py:1 b.py:1
msgid "c1"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "a"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "b"
msgstr ""
<BLANKLINE>
'''
def compare_occurrences(a, b):
"""
Compare an entry occurrence with another one.
"""
if a[0] != b[0]:
return a[0] < b[0]
if a[1] != b[1]:
return a[1] < b[1]
return 0
# First: Obsolete test
if self.obsolete != other.obsolete:
if self.obsolete:
return -1
else:
return 1
# Work on a copy to protect original
occ1 = self.occurrences[:]
occ2 = other.occurrences[:]
# Sorting using compare method
occ1.sort(compare_occurrences)
occ2.sort(compare_occurrences)
# Comparing sorted occurrences
pos = 0
for entry1 in occ1:
try:
entry2 = occ2[pos]
except IndexError:
return 1
pos = pos + 1
if entry1[0] != entry2[0]:
if entry1[0] > entry2[0]:
return 1
else:
return -1
if entry1[1] != entry2[1]:
if entry1[1] > entry2[1]:
return 1
else:
return -1
# Finally: Compare message ID
if self.msgid > other.msgid: return 1
else: return -1
def translated(self):
"""Return True if the entry has been translated or False"""
if self.obsolete or 'fuzzy' in self.flags:
return False
if self.msgstr != '':
return True
if self.msgstr_plural:
for pos in self.msgstr_plural:
if self.msgstr_plural[pos] == '':
return False
return True
return False
# }}}
class MOEntry(_BaseEntry):
"""
Represents a mo file entry.
**Examples**:
>>> entry = MOEntry()
>>> entry.msgid = 'translate me !'
>>> entry.msgstr = 'traduisez moi !'
>>> print entry
msgid "translate me !"
msgstr "traduisez moi !"
<BLANKLINE>
"""
# class MOEntry {{{
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
return _BaseEntry.__str__(self, wrapwidth)
# }}}
class _POFileParser(object):
"""
A finite state machine to parse efficiently and correctly po
file format.
"""
# class _POFileParser {{{
def __init__(self, fpath):
"""
Constructor.
**Keyword argument**:
- *fpath*: string, path to the po file
"""
self.fhandle = open(fpath, 'r')
self.instance = POFile(fpath=fpath)
self.transitions = {}
self.current_entry = POEntry()
self.current_state = 'ST'
self.current_token = None
# two memo flags used in handlers
self.msgstr_index = 0
self.entry_obsolete = 0
# Configure the state machine, by adding transitions.
# Signification of symbols:
# * ST: Beginning of the file (start)
# * HE: Header
# * TC: a translation comment
# * GC: a generated comment
# * OC: a file/line occurence
# * FL: a flags line
# * MI: a msgid
# * MP: a msgid plural
# * MS: a msgstr
# * MX: a msgstr plural
# * MC: a msgid or msgstr continuation line
all_ = ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI']
self.add('TC', ['ST', 'HE'], 'HE')
self.add('TC', ['GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI'], 'TC')
self.add('GC', all_, 'GC')
self.add('OC', all_, 'OC')
self.add('FL', all_, 'FL')
self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MX'], 'MI')
self.add('MP', ['TC', 'GC', 'MI'], 'MP')
self.add('MS', ['MI', 'MP', 'TC'], 'MS')
self.add('MX', ['MI', 'MX', 'MP', 'TC'], 'MX')
self.add('MC', ['MI', 'MP', 'MS', 'MX'], 'MC')
def parse(self):
"""
Run the state machine, parse the file line by line and call process()
with the current matched symbol.
"""
i, lastlen = 1, 0
for line in self.fhandle:
line = _strstrip(line)
if line == '':
i = i+1
continue
if line[:3] == '#~ ':
line = line[3:]
self.entry_obsolete = 1
else:
self.entry_obsolete = 0
self.current_token = line
if line[:2] == '#:':
# we are on a occurrences line
self.process('OC', i)
elif line[:7] == 'msgid "':
# we are on a msgid
self.process('MI', i)
elif line[:8] == 'msgstr "':
# we are on a msgstr
self.process('MS', i)
elif line[:1] == '"':
# we are on a continuation line or some metadata
self.process('MC', i)
elif line[:14] == 'msgid_plural "':
# we are on a msgid plural
self.process('MP', i)
elif line[:7] == 'msgstr[':
# we are on a msgstr plural
self.process('MX', i)
elif line[:3] == '#, ':
# we are on a flags line
self.process('FL', i)
elif line[:2] == '# ' or line == '#':
if line == '#': line = line + ' '
# we are on a translator comment line
self.process('TC', i)
elif line[:2] == '#.':
# we are on a generated comment line
self.process('GC', i)
i = i+1
if self.current_entry:
# since entries are added when another entry is found, we must add
# the last entry here (only if there are lines)
_listappend(self.instance, self.current_entry)
# before returning the instance, check if there's metadata and if
# so extract it in a dict
firstentry = self.instance[0]
if firstentry.msgid == '': # metadata found
# remove the entry
firstentry = _listpop(self.instance, 0)
self.instance.metadata_is_fuzzy = firstentry.flags
key = None
for msg in firstentry.msgstr.splitlines():
try:
key, val = _strsplit(msg, ':', 1)
self.instance.metadata[key] = _strstrip(val)
except:
if key is not None:
self.instance.metadata[key] += '\n'+_strstrip(msg)
# close opened file
self.fhandle.close()
return self.instance
def add(self, symbol, states, next_state):
"""
Add a transition to the state machine.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
states -- list, a list of states (two chars symbols)
next_state -- the next state the fsm will have after the action
"""
for state in states:
action = getattr(self, 'handle_%s' % next_state.lower())
self.transitions[(symbol, state)] = (action, next_state)
def process(self, symbol, linenum):
"""
Process the transition corresponding to the current state and the
symbol provided.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
linenum -- integer, the current line number of the parsed file
"""
try:
(action, state) = self.transitions[(symbol, self.current_state)]
if action():
self.current_state = state
except Exception, e:
raise IOError('Syntax error in po file (line %s): %s' % \
(linenum, e))
# state handlers
def handle_he(self):
"""Handle a header comment."""
if self.instance.header != '':
self.instance.header += '\n'
self.instance.header += self.current_token[2:]
return 1
def handle_tc(self):
"""Handle a translator comment."""
if self.current_state in ['MC', 'MS', 'MX']:
_listappend(self.instance, self.current_entry)
self.current_entry = POEntry()
if self.current_entry.tcomment != '':
self.current_entry.tcomment += '\n'
self.current_entry.tcomment += self.current_token[2:]
return True
def handle_gc(self):
"""Handle a generated comment."""
if self.current_state in ['MC', 'MS', 'MX']:
_listappend(self.instance, self.current_entry)
self.current_entry = POEntry()
if self.current_entry.comment != '':
self.current_entry.comment += '\n'
self.current_entry.comment += self.current_token[3:]
return True
def handle_oc(self):
"""Handle a file:num occurence."""
if self.current_state in ['MC', 'MS', 'MX']:
_listappend(self.instance, self.current_entry)
self.current_entry = POEntry()
occurrences = _strsplit(self.current_token[3:])
for occurrence in occurrences:
if occurrence != '':
fil, line = _strsplit(occurrence, ':')
_listappend(self.current_entry.occurrences, (fil, line))
return True
def handle_fl(self):
"""Handle a flags line."""
if self.current_state in ['MC', 'MS', 'MX']:
_listappend(self.instance, self.current_entry)
self.current_entry = POEntry()
self.current_entry.flags += _strsplit(self.current_token[3:], ', ')
return True
def handle_mi(self):
"""Handle a msgid."""
if self.current_state in ['MC', 'MS', 'MX']:
_listappend(self.instance, self.current_entry)
self.current_entry = POEntry()
self.current_entry.obsolete = self.entry_obsolete
self.current_entry.msgid = unquote(self.current_token[7:-1])
return True
def handle_mp(self):
"""Handle a msgid plural."""
self.current_entry.msgid_plural = unquote(self.current_token[14:-1])
return True
def handle_ms(self):
"""Handle a msgstr."""
self.current_entry.msgstr = unquote(self.current_token[8:-1])
return True
def handle_mx(self):
"""Handle a msgstr plural."""
index, value = self.current_token[7], self.current_token[11:-1]
self.current_entry.msgstr_plural[index] = unquote(value)
self.msgstr_index = index
return True
def handle_mc(self):
"""Handle a msgid or msgstr continuation line."""
if self.current_state == 'MI':
self.current_entry.msgid += unquote(self.current_token[1:-1])
elif self.current_state == 'MP':
self.current_entry.msgid_plural += \
unquote(self.current_token[1:-1])
elif self.current_state == 'MS':
self.current_entry.msgstr += unquote(self.current_token[1:-1])
elif self.current_state == 'MX':
msgstr = self.current_entry.msgstr_plural[self.msgstr_index] +\
unquote(self.current_token[1:-1])
self.current_entry.msgstr_plural[self.msgstr_index] = msgstr
# don't change the current state
return False
# }}}
class _MOFileParser(object):
"""
A class to parse binary mo files.
"""
# class _MOFileParser {{{
def __init__(self, fpath):
"""_MOFileParser constructor."""
self.fhandle = open(fpath, 'rb')
self.instance = MOFile(fpath)
def parse_magicnumber(self):
"""
Parse the magic number and raise an exception if not valid.
"""
magic_number = self._readbinary(fmt='4s')
# magic number must be 0xde120495 or 0x950412de
if magic_number not in ['\xde\x12\x04\x95', '\x95\x04\x12\xde']:
raise IOError('Invalid mo file, magic number is incorrect !')
self.instance.magic_number = magic_number
def parse(self):
"""
Build the instance with the file handle provided in the
constructor.
"""
self.parse_magicnumber()
# parse the version number
self.instance.version = self._readbinary('L')
# parse the number of strings
numofstrings = self._readbinary('L')
# original strings hash table offset
msgids_hash_offset = self._readbinary('L')
# translation strings hash table offset
msgstrs_hash_offset = self._readbinary('P')
# move to msgid hash table and read length and offset of msgids
self.fhandle.seek(msgids_hash_offset)
msgids_index = []
for i in range(numofstrings):
_listappend(msgids_index, self._readbinary('LL'))
# move to msgstr hash table and read length and offset of msgstrs
self.fhandle.seek(msgstrs_hash_offset)
msgstrs_index = []
for i in range(numofstrings):
_listappend(msgstrs_index, self._readbinary('LL'))
# build entries
for i in range(numofstrings):
self.fhandle.seek(msgids_index[i][1])
msgid = self.fhandle.read(msgids_index[i][0])
self.fhandle.seek(msgstrs_index[i][1])
msgstr = self.fhandle.read(msgstrs_index[i][0])
if i == 0: # metadata
raw_metadata, metadata = _strsplit(msgstr, '\n'), {}
for line in raw_metadata:
tokens = _strsplit(line, ':', 1)
if tokens[0] != '':
try:
metadata[tokens[0]] = _strstrip(tokens[1])
except IndexError:
metadata[tokens[0]] = ''
self.instance.metadata = metadata
continue
entry = MOEntry(msgid=msgid, msgstr=msgstr)
_listappend(self.instance, entry)
# close opened file
self.fhandle.close()
return self.instance
def _readbinary(self, fmt='c'):
"""
Private method that unpack n bytes of data using format <fmt>.
It returns a tuple or a mixed value if the tuple length is 1.
"""
numbytes = struct.calcsize(fmt)
bytes = self.fhandle.read(numbytes)
tup = struct.unpack(fmt, bytes)
if len(tup) == 1:
return tup[0]
return tup
# }}}
if __name__ == '__main__':
"""
**Main function**::
- to **test** the module just run: *python polib.py [-v]*
- to **profile** the module: *python polib.py -p <some_pofile.po>*
"""
# main function {{{
import sys
if len(sys.argv) > 2 and sys.argv[1] == '-p':
def test(f):
if f.endswith('po'):
p = pofile(f)
else:
p = mofile(f)
s = str(p)
import profile
profile.run('test("'+sys.argv[2]+'")')
else:
import doctest
doctest.testmod()
# }}}
| |
from sqlalchemy import (
Column, DateTime, ForeignKey, Integer,
String, SmallInteger, UniqueConstraint, event,
BigInteger)
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mysql import JSON
from rdr_service.model.base import Base, model_insert_listener, model_update_listener
from rdr_service.model.utils import Enum, MultiEnum, UTCDateTime, UTCDateTime6
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag, GenomicJob, \
GenomicWorkflowState, GenomicSubProcessStatus, GenomicSubProcessResult, GenomicManifestTypes, \
GenomicContaminationCategory, GenomicQcStatus, GenomicIncidentCode, GenomicIncidentStatus, GenomicReportState
class GenomicSet(Base):
"""
Genomic Set model
"""
__tablename__ = "genomic_set"
genomicSetMember = relationship("GenomicSetMember", cascade="all, delete-orphan")
# Primary Key
id = Column("id", Integer, primary_key=True, autoincrement=True, nullable=False)
# have mysql set the creation data for each new order
created = Column("created", DateTime, nullable=True)
# have mysql always update the modified data when the record is changed
modified = Column("modified", DateTime, nullable=True)
genomicSetName = Column("genomic_set_name", String(80), nullable=False)
genomicSetCriteria = Column("genomic_set_criteria", String(80), nullable=False)
genomicSetVersion = Column("genomic_set_version", Integer, nullable=False)
# genomic set file
genomicSetFile = Column("genomic_set_file", String(250), nullable=True)
# genomic set file timestamp
genomicSetFileTime = Column("genomic_set_file_time", DateTime, nullable=True)
genomicSetStatus = Column("genomic_set_status", Enum(GenomicSetStatus), default=GenomicSetStatus.UNSET)
validatedTime = Column("validated_time", DateTime, nullable=True)
__table_args__ = (UniqueConstraint("genomic_set_name", "genomic_set_version", name="uidx_genomic_name_version"),)
event.listen(GenomicSet, "before_insert", model_insert_listener)
event.listen(GenomicSet, "before_update", model_update_listener)
class GenomicSetMember(Base):
"""
Genomic Set Member model
"""
__tablename__ = "genomic_set_member"
# Primary Key
id = Column("id", Integer, primary_key=True, autoincrement=True, nullable=False)
# have mysql set the creation data for each new order
created = Column("created", DateTime, nullable=True)
# have mysql always update the modified data when the record is changed
modified = Column("modified", DateTime, nullable=True)
genomicSetId = Column("genomic_set_id", Integer, ForeignKey("genomic_set.id"), nullable=False)
participantId = Column("participant_id", Integer, nullable=True)
nyFlag = Column("ny_flag", Integer, nullable=True)
sexAtBirth = Column("sex_at_birth", String(20), nullable=True)
genomeType = Column("genome_type", String(80), nullable=True)
ai_an = Column('ai_an', String(2), nullable=True)
"""Flag for if participant is American Indian/Alaska Native"""
biobankId = Column("biobank_id", String(128), nullable=True, index=True)
packageId = Column("package_id", String(250), nullable=True)
validationStatus = Column("validation_status", Enum(GenomicSetMemberStatus), default=GenomicSetMemberStatus.UNSET)
validationFlags = Column("validation_flags", MultiEnum(GenomicValidationFlag), nullable=True)
validatedTime = Column("validated_time", DateTime, nullable=True)
# collectionTubeId corresponds to biobank_stored_sample_id
collectionTubeId = Column('collection_tube_id', String(80), nullable=True, index=True)
# sampleId is the great-grandchild aliquot of collectionTubeID
sampleId = Column('sample_id', String(80), nullable=True, index=True)
sampleType = Column('sample_type', String(50), nullable=True)
sequencingFileName = Column('sequencing_file_name', String(255), nullable=True)
"""Name of the csv file being used for genomics sequencing"""
gcSiteId = Column('gc_site_id', String(11), nullable=True)
# BBGC Manifest Columns; ingested from GC manifest
gcManifestBoxStorageUnitId = Column('gc_manifest_box_storage_unit_id', String(255), nullable=True)
gcManifestBoxPlateId = Column('gc_manifest_box_plate_id', String(255), nullable=True)
gcManifestWellPosition = Column('gc_manifest_well_position', String(10), nullable=True)
gcManifestParentSampleId = Column('gc_manifest_parent_sample_id', String(20), nullable=True)
gcManifestMatrixId = Column('gc_manifest_matrix_id', String(20), nullable=True)
gcManifestTreatments = Column('gc_manifest_treatments', String(20), nullable=True)
gcManifestQuantity_ul = Column('gc_manifest_quantity_ul', Integer, nullable=True)
gcManifestTotalConcentration_ng_per_ul = Column('gc_manifest_total_concentration_ng_per_ul', Integer, nullable=True)
gcManifestTotalDNA_ng = Column('gc_manifest_total_dna_ng', Integer, nullable=True)
gcManifestVisitDescription = Column('gc_manifest_visit_description', String(128), nullable=True)
gcManifestSampleSource = Column('gc_manifest_sample_source', String(20), nullable=True)
gcManifestStudy = Column('gc_manifest_study', String(255), nullable=True)
gcManifestTrackingNumber = Column('gc_manifest_tracking_number', String(255), nullable=True)
gcManifestContact = Column('gc_manifest_contact', String(255), nullable=True)
gcManifestEmail = Column('gc_manifest_email', String(255), nullable=True)
gcManifestStudyPI = Column('gc_manifest_study_pi', String(255), nullable=True)
gcManifestTestName = Column('gc_manifest_test_name', String(255), nullable=True)
gcManifestFailureMode = Column('gc_manifest_failure_mode', String(128), nullable=True)
gcManifestFailureDescription = Column('gc_manifest_failure_description', String(255), nullable=True)
# File Processed IDs
aw1FileProcessedId = Column('aw1_file_processed_id',
Integer, ForeignKey("genomic_file_processed.id"),
nullable=True)
aw2FileProcessedId = Column('aw2_file_processed_id',
Integer, ForeignKey("genomic_file_processed.id"),
nullable=True)
# Reconciliation and Manifest columns
# Reconciled to BB Manifest
reconcileMetricsBBManifestJobRunId = Column('reconcile_metrics_bb_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
# Reconciled to GC manifest
reconcileGCManifestJobRunId = Column('reconcile_gc_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
reconcileMetricsSequencingJobRunId = Column('reconcile_metrics_sequencing_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
reconcileCvlJobRunId = Column('reconcile_cvl_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
gemA1ManifestJobRunId = Column('gem_a1_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
gemA2ManifestJobRunId = Column('gem_a2_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
gemPass = Column('gem_pass', String(10), nullable=True)
gemDateOfImport = Column("gem_date_of_import", DateTime, nullable=True)
gemA3ManifestJobRunId = Column('gem_a3_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
aw3ManifestJobRunID = Column('aw3_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
aw4ManifestJobRunID = Column('aw4_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
aw2fManifestJobRunID = Column('aw2f_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
# CVL WGS Fields
cvlW1ManifestJobRunId = Column('cvl_w1_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
cvlW2ManifestJobRunID = Column('cvl_w2_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
cvlW3ManifestJobRunID = Column('cvl_w3_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
cvlW4ManifestJobRunID = Column('cvl_w4_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
cvlW4FManifestJobRunID = Column('cvl_w4f_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
cvlAW1CManifestJobRunID = Column('cvl_aw1c_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
cvlAW1CFManifestJobRunID = Column('cvl_aw1cf_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
colorMetricsJobRunID = Column('color_metrics_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
aw3ManifestFileId = Column('aw3_manifest_file_id',
Integer, ForeignKey("genomic_manifest_file.id"),
nullable=True)
aw0ManifestFileId = Column('aw0_manifest_file_id',
Integer, ForeignKey("genomic_manifest_file.id"),
nullable=True)
gemMetricsAncestryLoopResponse = Column('gem_metrics_ancestry_loop_response',
String(10), nullable=True)
gemMetricsAvailableResults = Column('gem_metrics_available_results',
String(255), nullable=True)
gemMetricsResultsReleasedAt = Column('gem_metrics_results_released_at',
DateTime, nullable=True)
# Genomic State Fields
genomicWorkflowState = Column('genomic_workflow_state',
Enum(GenomicWorkflowState),
default=GenomicWorkflowState.UNSET)
genomicWorkflowStateStr = Column('genomic_workflow_state_str', String(64), default="UNSET")
genomicWorkflowStateModifiedTime = Column("genomic_workflow_state_modified_time", DateTime, nullable=True)
reportConsentRemovalDate = Column('report_consent_removal_date', DateTime(timezone=True), nullable=True)
genomicWorkflowStateHistory = Column("genomic_workflow_state_history", JSON, nullable=True)
# Broad QC Status
qcStatus = Column('qc_status', Enum(GenomicQcStatus), default=GenomicQcStatus.UNSET)
qcStatusStr = Column('qc_status_str', String(64), default="UNSET")
# Broad fingerprint file path
fingerprintPath = Column('fingerprint_path', String(255), nullable=True)
# Developer note
devNote = Column('dev_note', String(255), nullable=True)
# For tracking replates
replatedMemberId = Column('replated_member_id',
ForeignKey('genomic_set_member.id'),
nullable=True)
ignoreFlag = Column('ignore_flag', SmallInteger, nullable=False, default=0)
blockResearch = Column('block_research', SmallInteger, nullable=False, default=0)
blockResearchReason = Column('block_research_reason', String(255), nullable=True)
blockResults = Column('block_results', SmallInteger, nullable=False, default=0)
blockResultsReason = Column('block_results_reason', String(255), nullable=True)
participantOrigin = Column("participant_origin", String(80), nullable=True)
cvlW2scManifestJobRunID = Column('cvl_w2sc_manifest_job_run_id',
Integer, ForeignKey("genomic_job_run.id"),
nullable=True)
event.listen(GenomicSetMember, "before_insert", model_insert_listener)
event.listen(GenomicSetMember, "before_update", model_update_listener)
class GenomicJobRun(Base):
"""Genomic Job Run model.
This model represents a 'run' of a genomics job,
And tracks the results of a run."""
__tablename__ = 'genomic_job_run'
# Primary Key
id = Column('id', Integer,
primary_key=True,
autoincrement=True,
nullable=False)
created = Column("created", UTCDateTime, nullable=True)
modified = Column("modified", UTCDateTime, nullable=True)
jobId = Column('job_id', Enum(GenomicJob),
default=GenomicJob.UNSET, nullable=False)
jobIdStr = Column('job_id_str', String(64), default="UNSET")
startTime = Column('start_time', DateTime, nullable=False)
endTime = Column('end_time', DateTime, nullable=True)
runStatus = Column('run_status',
Enum(GenomicSubProcessStatus),
default=GenomicSubProcessStatus.RUNNING)
runResult = Column('run_result',
Enum(GenomicSubProcessResult),
default=GenomicSubProcessResult.UNSET)
runResultStr = Column('run_result_str', String(64), default="UNSET")
resultMessage = Column('result_message', String(150), nullable=True)
event.listen(GenomicJobRun, 'before_insert', model_insert_listener)
event.listen(GenomicJobRun, 'before_update', model_update_listener)
class GenomicFileProcessed(Base):
"""Genomic File Processed model.
This model represents the file(s) processed during a genomics run."""
__tablename__ = 'genomic_file_processed'
# Primary Key
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column("created", UTCDateTime, nullable=True)
modified = Column("modified", UTCDateTime, nullable=True)
runId = Column('run_id', Integer,
ForeignKey('genomic_job_run.id'), nullable=False)
startTime = Column('start_time', DateTime, nullable=False)
endTime = Column('end_time', DateTime, nullable=True)
genomicManifestFileId = Column('genomic_manifest_file_id', Integer,
ForeignKey("genomic_manifest_file.id"),
nullable=True)
# TODO: file_path, bucket_name, file_name, and upload_date to be removed
# after genomic_manifest_file created, backfilled, and downstream partners notified.
filePath = Column('file_path', String(255), nullable=False, index=True)
bucketName = Column('bucket_name', String(128), nullable=False)
fileName = Column('file_name', String(128), nullable=False)
fileStatus = Column('file_status',
Enum(GenomicSubProcessStatus),
default=GenomicSubProcessStatus.QUEUED)
fileResult = Column('file_result',
Enum(GenomicSubProcessResult),
default=GenomicSubProcessResult.UNSET)
uploadDate = Column('upload_date', UTCDateTime, nullable=True)
event.listen(GenomicFileProcessed, 'before_insert', model_insert_listener)
event.listen(GenomicFileProcessed, 'before_update', model_update_listener)
class GenomicManifestFile(Base):
"""
Genomic manifest file model.
This model represents a manifest file.
This includes both RDR and externally-generated manifests.
"""
__tablename__ = 'genomic_manifest_file'
id = Column('id', Integer, primary_key=True, autoincrement=True, nullable=False)
created = Column("created", UTCDateTime, nullable=False)
modified = Column("modified", UTCDateTime, nullable=False)
uploadDate = Column('upload_date', UTCDateTime, nullable=True)
manifestTypeId = Column('manifest_type_id', Enum(GenomicManifestTypes), nullable=True)
manifestTypeIdStr = Column('manifest_type_id_str', String(64), nullable=True)
filePath = Column('file_path', String(255), nullable=True, index=True)
fileName = Column('file_name', String(255), nullable=True, index=True)
bucketName = Column('bucket_name', String(128), nullable=True)
recordCount = Column('record_count', Integer, nullable=False, default=0)
rdrProcessingComplete = Column('rdr_processing_complete', SmallInteger, nullable=False, default=0)
rdrProcessingCompleteDate = Column('rdr_processing_complete_date', UTCDateTime, nullable=True)
# TODO: Deprecated via DA-1865, to be removed after `ignore_flag` backfilled
ignore = Column('ignore', SmallInteger, nullable=False, default=0)
# Replaces `ignore` DA-1865
ignore_flag = Column('ignore_flag', SmallInteger, nullable=False, default=0)
__table_args__ = (UniqueConstraint('file_path', 'ignore', name='_file_path_ignore_uc'),)
event.listen(GenomicManifestFile, 'before_insert', model_insert_listener)
event.listen(GenomicManifestFile, 'before_update', model_update_listener)
class GenomicManifestFeedback(Base):
"""
Genomic manifest feedback model.
This model represents a relationship
between two genomic_manifest_file records:
the input file and the feedback file.
"""
__tablename__ = 'genomic_manifest_feedback'
id = Column('id', Integer, primary_key=True, autoincrement=True, nullable=False)
created = Column("created", UTCDateTime, nullable=False)
modified = Column("modified", UTCDateTime, nullable=False)
# Foreign keys to genomic_manifest_file
# Relates two manifests: the Input manifest and the Feedback manifest
inputManifestFileId = Column("input_manifest_file_id", Integer,
ForeignKey("genomic_manifest_file.id"), nullable=False)
feedbackManifestFileId = Column("feedback_manifest_file_id", Integer,
ForeignKey("genomic_manifest_file.id"), nullable=True)
# Records RDR has received feedback for
feedbackRecordCount = Column('feedback_record_count', Integer, nullable=False, default=0)
# Once feedback_record_count = genomic_manifest_file.record_count
# feedback_complete = 1 and a feedback manifest is generated, i.e. AW2F.
feedbackComplete = Column('feedback_complete', SmallInteger, nullable=False, default=0)
feedbackCompleteDate = Column('feedback_complete_date', UTCDateTime, nullable=True)
# TODO: Deprecated via DA-1865, to be removed after `ignore_flag` backfilled
ignore = Column('ignore', SmallInteger, nullable=False, default=0)
# Replaces `ignore` DA-1865
ignoreFlag = Column('ignore_flag', SmallInteger, nullable=False, default=0)
version = Column(Integer, nullable=False, default=0)
event.listen(GenomicManifestFeedback, 'before_insert', model_insert_listener)
event.listen(GenomicManifestFeedback, 'before_update', model_update_listener)
class GenomicAW1Raw(Base):
"""
Raw text data from AW1 files
"""
__tablename__ = 'genomic_aw1_raw'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
# Auto-Timestamps
created = Column('created', DateTime, nullable=True)
modified = Column('modified', DateTime, nullable=True)
file_path = Column('file_path', String(255), nullable=True, index=True)
ignore_flag = Column('ignore_flag', SmallInteger, nullable=False, default=0)
dev_note = Column('dev_note', String(255), nullable=True)
# Raw AW1 data
package_id = Column("package_id", String(255), nullable=True)
biobankid_sample_id = Column("biobankid_sample_id", String(255), nullable=True)
box_storageunit_id = Column("box_storageunit_id", String(255), nullable=True)
box_id_plate_id = Column("box_id_plate_id", String(255), nullable=True)
well_position = Column("well_position", String(255), nullable=True)
sample_id = Column("sample_id", String(255), nullable=True, index=True)
parent_sample_id = Column("parent_sample_id", String(255), nullable=True, index=True)
collection_tube_id = Column("collection_tube_id", String(255), nullable=True, index=True)
matrix_id = Column("matrix_id", String(255), nullable=True)
collection_date = Column("collection_date", String(255), nullable=True)
biobank_id = Column("biobank_id", String(255), nullable=True, index=True)
sex_at_birth = Column("sex_at_birth", String(255), nullable=True)
"""Assigned sex at birth"""
age = Column("age", String(255), nullable=True)
ny_state = Column("ny_state", String(255), nullable=True)
sample_type = Column("sample_type", String(255), nullable=True)
treatments = Column("treatments", String(255), nullable=True)
quantity = Column("quantity", String(255), nullable=True)
total_concentration = Column("total_concentration", String(255), nullable=True)
total_dna = Column("total_dna", String(255), nullable=True)
visit_description = Column("visit_description", String(255), nullable=True)
sample_source = Column("sample_source", String(255), nullable=True)
study = Column("study", String(255), nullable=True)
tracking_number = Column("tracking_number", String(255), nullable=True)
contact = Column("contact", String(255), nullable=True)
email = Column("email", String(255), nullable=True)
study_pi = Column("study_pi", String(255), nullable=True)
site_name = Column("site_name", String(255), nullable=True, index=True)
test_name = Column("test_name", String(255), nullable=True, index=True)
failure_mode = Column("failure_mode", String(255), nullable=True)
failure_mode_desc = Column("failure_mode_desc", String(255), nullable=True)
genome_type = Column(String(80), nullable=True, index=True)
event.listen(GenomicAW1Raw, 'before_insert', model_insert_listener)
event.listen(GenomicAW1Raw, 'before_update', model_update_listener)
class GenomicAW2Raw(Base):
"""
Raw text data from AW2 files
"""
__tablename__ = 'genomic_aw2_raw'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
# Auto-Timestamps
created = Column('created', DateTime, nullable=True)
modified = Column('modified', DateTime, nullable=True)
file_path = Column('file_path', String(255), nullable=True, index=True)
ignore_flag = Column('ignore_flag', SmallInteger, nullable=False, default=0)
dev_note = Column('dev_note', String(255), nullable=True)
# Raw AW2 Data
biobank_id = Column(String(255), nullable=True)
sample_id = Column(String(255), nullable=True)
biobankidsampleid = Column(String(255), nullable=True)
lims_id = Column(String(255), nullable=True)
mean_coverage = Column(String(255), nullable=True)
genome_coverage = Column(String(255), nullable=True)
aouhdr_coverage = Column(String(255), nullable=True)
contamination = Column(String(255), nullable=True)
sample_source = Column(String(255), nullable=True)
mapped_reads_pct = Column(String(255), nullable=True)
sex_concordance = Column(String(255), nullable=True)
sex_ploidy = Column(String(255), nullable=True)
aligned_q30_bases = Column(String(255), nullable=True)
array_concordance = Column(String(255), nullable=True)
processing_status = Column(String(255), nullable=True)
notes = Column(String(255), nullable=True)
chipwellbarcode = Column(String(255), nullable=True)
call_rate = Column(String(255), nullable=True)
genome_type = Column(String(80), nullable=True)
pipeline_id = Column(String(255), nullable=True)
event.listen(GenomicAW2Raw, 'before_insert', model_insert_listener)
event.listen(GenomicAW2Raw, 'before_update', model_update_listener)
class GenomicAW3Raw(Base):
"""
Raw data from AW3 files
"""
__tablename__ = 'genomic_aw3_raw'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
# Auto-Timestamps
created = Column('created', DateTime, nullable=True)
modified = Column('modified', DateTime, nullable=True)
file_path = Column(String(255), nullable=True, index=True)
ignore_flag = Column(SmallInteger, nullable=False, default=0)
dev_note = Column(String(255), nullable=True)
genome_type = Column(String(255), nullable=True, index=True)
# Raw AW3 Data
chipwellbarcode = Column(String(255), nullable=True, index=True)
biobank_id = Column(String(255), nullable=True, index=True)
sample_id = Column(String(255), nullable=True, index=True)
research_id = Column(String(255), nullable=True, index=True)
biobankidsampleid = Column(String(255), nullable=True)
sex_at_birth = Column(String(255), nullable=True)
site_id = Column(String(255), nullable=True, index=True)
callrate = Column(String(255), nullable=True)
sex_concordance = Column(String(255), nullable=True)
contamination = Column(String(255), nullable=True)
processing_status = Column(String(255), nullable=True)
mean_coverage = Column(String(255), nullable=True)
sample_source = Column(String(255), nullable=True)
pipeline_id = Column(String(255), nullable=True)
mapped_reads_pct = Column(String(255), nullable=True)
sex_ploidy = Column(String(255), nullable=True)
ai_an = Column(String(255), nullable=True)
blocklisted = Column(String(255), nullable=True, index=True)
blocklisted_reason = Column(String(255), nullable=True)
red_idat_path = Column(String(255), nullable=True)
red_idat_md5_path = Column(String(255), nullable=True)
green_idat_path = Column(String(255), nullable=True)
green_idat_md5_path = Column(String(255), nullable=True)
vcf_path = Column(String(255), nullable=True)
vcf_index_path = Column(String(255), nullable=True)
vcf_md5_path = Column(String(255), nullable=True)
vcf_hf_path = Column(String(255), nullable=True)
vcf_hf_index_path = Column(String(255), nullable=True)
vcf_hf_md5_path = Column(String(255), nullable=True)
cram_path = Column(String(255), nullable=True)
cram_md5_path = Column(String(255), nullable=True)
crai_path = Column(String(255), nullable=True)
gvcf_path = Column(String(255), nullable=True)
gvcf_md5_path = Column(String(255), nullable=True)
event.listen(GenomicAW3Raw, 'before_insert', model_insert_listener)
event.listen(GenomicAW3Raw, 'before_update', model_update_listener)
class GenomicAW4Raw(Base):
"""
Raw data from AW4 files
"""
__tablename__ = 'genomic_aw4_raw'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
# Auto-Timestamps
created = Column('created', DateTime, nullable=True)
modified = Column('modified', DateTime, nullable=True)
file_path = Column(String(255), nullable=True, index=True)
ignore_flag = Column(SmallInteger, nullable=False, default=0)
dev_note = Column(String(255), nullable=True)
genome_type = Column(String(255), nullable=True, index=True)
# Raw AW4 Data
biobank_id = Column(String(255), nullable=True, index=True)
sample_id = Column(String(255), nullable=True, index=True)
sex_at_birth = Column(String(255), nullable=True)
site_id = Column(String(255), nullable=True, index=True)
red_idat_path = Column(String(255), nullable=True)
red_idat_md5_path = Column(String(255), nullable=True)
green_idat_path = Column(String(255), nullable=True)
green_idat_md5_path = Column(String(255), nullable=True)
vcf_path = Column(String(255), nullable=True)
vcf_index_path = Column(String(255), nullable=True)
vcf_hf_path = Column(String(255), nullable=True)
vcf_hf_md5_path = Column(String(255), nullable=True)
vcf_hf_index_path = Column(String(255), nullable=True)
vcf_raw_path = Column(String(255), nullable=True)
vcf_raw_md5_path = Column(String(255), nullable=True)
vcf_raw_index_path = Column(String(255), nullable=True)
gvcf_path = Column(String(255), nullable=True)
gvcf_md5_path = Column(String(255), nullable=True)
cram_path = Column(String(255), nullable=True)
cram_md5_path = Column(String(255), nullable=True)
crai_path = Column(String(255), nullable=True)
research_id = Column(String(255), nullable=True, index=True)
qc_status = Column(String(255), nullable=True)
drc_sex_concordance = Column(String(255), nullable=True)
drc_call_rate = Column(String(255), nullable=True)
drc_contamination = Column(String(255), nullable=True)
drc_mean_coverage = Column(String(255), nullable=True)
drc_fp_concordance = Column(String(255), nullable=True)
pass_to_research_pipeline = Column(String(255), nullable=True)
event.listen(GenomicAW4Raw, 'before_insert', model_insert_listener)
event.listen(GenomicAW4Raw, 'before_update', model_update_listener)
class GenomicGCValidationMetrics(Base):
"""Genomic Sequencing Metrics model.
This is the data ingested from
Genome Centers' validation result metrics files."""
__tablename__ = 'genomic_gc_validation_metrics'
# Primary Key
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
genomicSetMemberId = Column('genomic_set_member_id',
ForeignKey('genomic_set_member.id'),
nullable=True)
genomicFileProcessedId = Column('genomic_file_processed_id',
ForeignKey('genomic_file_processed.id'))
# Auto-Timestamps
created = Column('created', DateTime, nullable=True)
modified = Column('modified', DateTime, nullable=True)
# Ingested Data
limsId = Column('lims_id', String(80), nullable=True)
chipwellbarcode = Column('chipwellbarcode', String(80), nullable=True)
callRate = Column('call_rate', String(10), nullable=True)
meanCoverage = Column('mean_coverage', String(10), nullable=True)
genomeCoverage = Column('genome_coverage', String(10), nullable=True)
aouHdrCoverage = Column('aou_hdr_coverage', String(10), nullable=True)
contamination = Column('contamination', String(10), nullable=True)
mappedReadsPct = Column('mapped_reads_pct', String(10), nullable=True)
sexConcordance = Column('sex_concordance', String(10), nullable=True)
sexPloidy = Column('sex_ploidy', String(10), nullable=True)
alignedQ30Bases = Column('aligned_q30_bases', BigInteger, nullable=True)
arrayConcordance = Column('array_concordance', String(10), nullable=True)
processingStatus = Column('processing_status', String(15), nullable=True)
notes = Column('notes', String(128), nullable=True)
siteId = Column('site_id', String(80), nullable=True)
drcSexConcordance = Column('drc_sex_concordance', String(255), nullable=True)
drcContamination = Column('drc_contamination', String(255), nullable=True)
drcCallRate = Column('drc_call_rate', String(255), nullable=True)
drcMeanCoverage = Column('drc_mean_coverage', String(255), nullable=True)
drcFpConcordance = Column('drc_fp_concordance', String(255), nullable=True)
# Genotyping Data (Array) reconciliation
idatRedReceived = Column('idat_red_received', SmallInteger, nullable=False, default=0)
idatRedDeleted = Column('idat_red_deleted', SmallInteger, nullable=False, default=0)
idatRedPath = Column('idat_red_path', String(255), nullable=True)
idatGreenReceived = Column('idat_green_received', SmallInteger, nullable=False, default=0)
idatGreenDeleted = Column('idat_green_deleted', SmallInteger, nullable=False, default=0)
idatGreenPath = Column('idat_green_path', String(255), nullable=True)
idatRedMd5Received = Column('idat_red_md5_received', SmallInteger, nullable=False, default=0)
idatRedMd5Deleted = Column('idat_red_md5_deleted', SmallInteger, nullable=False, default=0)
idatRedMd5Path = Column('idat_red_md5_path', String(255), nullable=True)
idatGreenMd5Received = Column('idat_green_md5_received', SmallInteger, nullable=False, default=0)
idatGreenMd5Deleted = Column('idat_green_md5_deleted', SmallInteger, nullable=False, default=0)
idatGreenMd5Path = Column('idat_green_md5_path', String(255), nullable=True)
vcfReceived = Column('vcf_received', SmallInteger, nullable=False, default=0)
vcfDeleted = Column('vcf_deleted', SmallInteger, nullable=False, default=0)
vcfPath = Column('vcf_path', String(255), nullable=True)
vcfMd5Received = Column('vcf_md5_received', SmallInteger, nullable=False, default=0)
vcfMd5Deleted = Column('vcf_md5_deleted', SmallInteger, nullable=False, default=0)
vcfMd5Path = Column('vcf_md5_path', String(255), nullable=True)
vcfTbiReceived = Column('vcf_tbi_received', SmallInteger, nullable=False, default=0)
vcfTbiDeleted = Column('vcf_tbi_deleted', SmallInteger, nullable=False, default=0)
vcfTbiPath = Column('vcf_tbi_path', String(255), nullable=True)
# Sequencing Data (WGS) reconciliation
# Single sample VCF: Hard - filtered for clinical purpose
hfVcfReceived = Column('hf_vcf_received', SmallInteger, nullable=False, default=0)
hfVcfDeleted = Column('hf_vcf_deleted', SmallInteger, nullable=False, default=0)
hfVcfPath = Column('hf_vcf_path', String(255), nullable=True)
hfVcfTbiReceived = Column('hf_vcf_tbi_received', SmallInteger, nullable=False, default=0)
hfVcfTbiDeleted = Column('hf_vcf_tbi_deleted', SmallInteger, nullable=False, default=0)
hfVcfTbiPath = Column('hf_vcf_tbi_path', String(255), nullable=True)
hfVcfMd5Received = Column('hf_vcf_md5_received', SmallInteger, nullable=False, default=0)
hfVcfMd5Deleted = Column('hf_vcf_md5_deleted', SmallInteger, nullable=False, default=0)
hfVcfMd5Path = Column('hf_vcf_md5_path', String(255), nullable=True)
# Single sample VCF: Raw for research purpose
rawVcfReceived = Column('raw_vcf_received', SmallInteger, nullable=False, default=0)
rawVcfDeleted = Column('raw_vcf_deleted', SmallInteger, nullable=False, default=0)
rawVcfPath = Column('raw_vcf_path', String(255), nullable=True)
rawVcfTbiReceived = Column('raw_vcf_tbi_received', SmallInteger, nullable=False, default=0)
rawVcfTbiDeleted = Column('raw_vcf_tbi_deleted', SmallInteger, nullable=False, default=0)
rawVcfTbiPath = Column('raw_vcf_tbi_path', String(255), nullable=True)
rawVcfMd5Received = Column('raw_vcf_md5_received', SmallInteger, nullable=False, default=0)
rawVcfMd5Deleted = Column('raw_vcf_md5_deleted', SmallInteger, nullable=False, default=0)
rawVcfMd5Path = Column('raw_vcf_md5_path', String(255), nullable=True)
# CRAMs and CRAIs
cramReceived = Column('cram_received', SmallInteger, nullable=False, default=0)
cramDeleted = Column('cram_deleted', SmallInteger, nullable=False, default=0)
cramPath = Column('cram_path', String(255), nullable=True)
cramMd5Received = Column('cram_md5_received', SmallInteger, nullable=False, default=0)
cramMd5Deleted = Column('cram_md5_deleted', SmallInteger, nullable=False, default=0)
cramMd5Path = Column('cram_md5_path', String(255), nullable=True)
craiReceived = Column('crai_received', SmallInteger, nullable=False, default=0)
craiDeleted = Column('crai_deleted', SmallInteger, nullable=False, default=0)
craiPath = Column('crai_path', String(255), nullable=True)
gvcfReceived = Column('gvcf_received', SmallInteger, nullable=False, default=0)
gvcfDeleted = Column('gvcf_deleted', SmallInteger, nullable=False, default=0)
gvcfPath = Column('gvcf_path', String(512), nullable=True)
gvcfMd5Received = Column('gvcf_md5_received', SmallInteger, nullable=False, default=0)
gvcfMd5Deleted = Column('gvcf_md5_deleted', SmallInteger, nullable=False, default=0)
gvcfMd5Path = Column('gvcf_md5_path', String(255), nullable=True)
# Ignore Record
ignoreFlag = Column('ignore_flag', SmallInteger, nullable=True, default=0)
devNote = Column('dev_note', String(255), nullable=True)
# Contamination category
contaminationCategory = Column('contamination_category',
Enum(GenomicContaminationCategory),
default=GenomicSubProcessResult.UNSET)
contaminationCategoryStr = Column('contamination_category_str', String(64), default="UNSET")
pipelineId = Column('pipeline_id', String(255), nullable=True)
event.listen(GenomicGCValidationMetrics, 'before_insert', model_insert_listener)
event.listen(GenomicGCValidationMetrics, 'before_update', model_update_listener)
class GenomicSampleContamination(Base):
"""A list of samples that have been found to be contaminated with
information on what stage of the process they have been added to the table."""
__tablename__ = 'genomic_sample_contamination'
# Primary Key
id = Column('id', Integer, primary_key=True, autoincrement=True, nullable=False)
# Auto-Timestamps
created = Column('created', DateTime, nullable=True)
modified = Column('modified', DateTime, nullable=True)
sampleId = Column('sample_id', ForeignKey(BiobankStoredSample.biobankStoredSampleId), nullable=False)
failedInJob = Column('failed_in_job', Enum(GenomicJob), nullable=False)
event.listen(GenomicSampleContamination, 'before_insert', model_insert_listener)
event.listen(GenomicSampleContamination, 'before_update', model_update_listener)
class GenomicIncident(Base):
"""
An incident occuring during processing of genomic records
"""
__tablename__ = 'genomic_incident'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column('created', DateTime)
modified = Column('modified', DateTime)
ignore_flag = Column(SmallInteger, nullable=False, default=0)
dev_note = Column(String(255))
code = Column(String(80), default=GenomicIncidentCode.UNSET.name)
message = Column(String(512))
status = Column(String(80), default=GenomicIncidentStatus.OPEN.name)
slack_notification = Column(SmallInteger, nullable=False, default=0)
slack_notification_date = Column(DateTime, nullable=True)
source_job_run_id = Column(Integer, ForeignKey("genomic_job_run.id"))
source_file_processed_id = Column(Integer, ForeignKey("genomic_file_processed.id"))
audit_job_run_id = Column(Integer, ForeignKey("genomic_job_run.id"))
repair_job_run_id = Column(Integer, ForeignKey("genomic_job_run.id"))
genomic_set_member_id = Column(Integer, ForeignKey("genomic_set_member.id"))
gc_validation_metrics_id = Column(Integer, ForeignKey("genomic_gc_validation_metrics.id"))
participant_id = Column(String(128), index=True)
biobank_id = Column(String(128), index=True)
sample_id = Column(String(80), index=True)
collection_tube_id = Column(String(80), index=True)
data_file_path = Column(String(512))
submitted_gc_site_id = Column(String(128), nullable=True)
email_notification_sent = Column(SmallInteger, nullable=True, default=0)
email_notification_sent_date = Column(DateTime, nullable=True)
manifest_file_name = Column(String(512), nullable=True)
event.listen(GenomicIncident, 'before_insert', model_insert_listener)
event.listen(GenomicIncident, 'before_update', model_update_listener)
class GenomicCloudRequests(Base):
"""
Used for capturing cloud requests payloads via
Google Cloud Functions
"""
__tablename__ = 'genomic_cloud_requests'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime)
modified = Column(DateTime)
event_payload = Column(JSON, nullable=False)
topic = Column(String(255), nullable=False)
api_route = Column(String(255), nullable=False)
file_path = Column(String(255), nullable=False)
task = Column(String(255), nullable=False)
bucket_name = Column(String(255), nullable=False)
event.listen(GenomicCloudRequests, 'before_insert', model_insert_listener)
event.listen(GenomicCloudRequests, 'before_update', model_update_listener)
class GenomicMemberReportState(Base):
"""
Used for maintaining one-to-many relationship
from GenomicSetMember based on multiple report states
"""
__tablename__ = 'genomic_member_report_state'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
genomic_set_member_id = Column(ForeignKey('genomic_set_member.id'), nullable=False)
genomic_report_state = Column(Enum(GenomicReportState), default=GenomicReportState.UNSET)
genomic_report_state_str = Column(String(64), default="UNSET")
participant_id = Column(Integer, ForeignKey("participant.participant_id"), nullable=True)
created = Column(DateTime)
modified = Column(DateTime)
module = Column(String(80), nullable=False)
event.listen(GenomicMemberReportState, 'before_insert', model_insert_listener)
event.listen(GenomicMemberReportState, 'before_update', model_update_listener)
class GenomicInformingLoop(Base):
"""
Used for maintaining normalized value set of
informing_loop_decision ingested from MessageBrokerEventData
"""
__tablename__ = 'genomic_informing_loop'
id = Column('id', Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime, nullable=True)
modified = Column(DateTime, nullable=True)
message_record_id = Column(Integer, nullable=True)
participant_id = Column(Integer, ForeignKey("participant.participant_id"), nullable=False)
event_type = Column(String(256), nullable=False)
event_authored_time = Column(UTCDateTime6)
module_type = Column(String(128))
decision_value = Column(String(128))
event.listen(GenomicInformingLoop, 'before_insert', model_insert_listener)
event.listen(GenomicInformingLoop, 'before_update', model_update_listener)
class GenomicResultViewed(Base):
"""
Used for maintaining normalized value set of
result_viewed ingested from MessageBrokerEventData
"""
__tablename__ = 'genomic_result_viewed'
id = Column(Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime)
modified = Column(DateTime)
message_record_id = Column(Integer, nullable=True)
participant_id = Column(Integer, ForeignKey("participant.participant_id"), nullable=False)
event_type = Column(String(256), nullable=False)
event_authored_time = Column(UTCDateTime6)
module_type = Column(String(128))
first_viewed = Column(UTCDateTime6)
last_viewed = Column(UTCDateTime6)
event.listen(GenomicResultViewed, 'before_insert', model_insert_listener)
event.listen(GenomicResultViewed, 'before_update', model_update_listener)
class GenomicGcDataFile(Base):
"""
Used for tracking genomic data files produced by the GCs
"""
__tablename__ = 'genomic_gc_data_file'
id = Column(Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime)
modified = Column(DateTime)
file_path = Column(String(255), nullable=False, index=True)
gc_site_id = Column(String(64), nullable=False, index=True)
bucket_name = Column(String(128), nullable=False, index=True)
file_prefix = Column(String(128), nullable=True)
file_name = Column(String(128), nullable=False)
file_type = Column(String(128), nullable=False, index=True) # everything after the first '.'
# reconciliation process uses the identifier_* fields to match metrics records
identifier_type = Column(String(128), index=True) # sample_id for WGS; chipwellbarcode for Array
identifier_value = Column(String(128), index=True) # value to match the metric record
ignore_flag = Column('ignore_flag', SmallInteger, nullable=False, default=0) # 0 is no, 1 is yes
event.listen(GenomicGcDataFile, 'before_insert', model_insert_listener)
event.listen(GenomicGcDataFile, 'before_update', model_update_listener)
class GenomicGcDataFileMissing(Base):
"""
Used for tracking missing genomic data files produced by the GCs
"""
__tablename__ = 'genomic_gc_data_file_missing'
id = Column(Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime)
modified = Column(DateTime)
gc_site_id = Column(String(64), nullable=False, index=True)
file_type = Column(String(128), nullable=False, index=True) # gc data file extension
run_id = Column(Integer, ForeignKey("genomic_job_run.id"), nullable=False)
gc_validation_metric_id = Column(Integer, ForeignKey("genomic_gc_validation_metrics.id"), nullable=False)
resolved = Column(SmallInteger, nullable=False, default=0) # 0 is no, 1 is yes
resolved_date = Column(DateTime, nullable=True) # 0 is no, 1 is yes
ignore_flag = Column(SmallInteger, nullable=False, default=0) # 0 is no, 1 is yes
event.listen(GenomicGcDataFileMissing, 'before_insert', model_insert_listener)
event.listen(GenomicGcDataFileMissing, 'before_update', model_update_listener)
class GcDataFileStaging(Base):
"""
Staging table for "GC data file reconciliation to table" job
Cleared and reloaded every job run
"""
__tablename__ = 'gc_data_file_staging'
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
bucket_name = Column(String(128), nullable=False, index=True)
file_path = Column(String(255), nullable=False, index=True)
class GemToGpMigration(Base):
"""
Used for storing GEM to GP migration records
"""
__tablename__ = 'gem_to_gp_migration'
id = Column(Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime)
modified = Column(DateTime)
ignore_flag = Column(SmallInteger, nullable=False, default=0) # 0 is no, 1 is yes
dev_note = Column(String(255), nullable=True)
file_path = Column(String(255), nullable=True, index=True)
run_id = Column(Integer, ForeignKey("genomic_job_run.id"))
# Fields sent to GP
participant_id = Column(Integer, nullable=True, index=True)
informing_loop_status = Column(String(64), nullable=True)
informing_loop_authored = Column(DateTime, index=True)
ancestry_traits_response = Column(String(64), nullable=True, index=True)
event.listen(GemToGpMigration, 'before_insert', model_insert_listener)
event.listen(GemToGpMigration, 'before_update', model_update_listener)
class UserEventMetrics(Base):
"""
Used for storage GHR3 user event metrics
"""
__tablename__ = 'user_event_metrics'
id = Column(Integer,
primary_key=True, autoincrement=True, nullable=False)
created = Column(DateTime)
modified = Column(DateTime)
participant_id = Column(Integer, ForeignKey("participant.participant_id"), nullable=False, index=True)
created_at = Column(String(255))
event_name = Column(String(512))
device = Column(String(255))
operating_system = Column(String(255))
browser = Column(String(255))
file_path = Column(String(512), index=True)
run_id = Column(Integer, ForeignKey("genomic_job_run.id"), nullable=False)
ignore_flag = Column(SmallInteger, nullable=False, default=0)
reconcile_job_run_id = Column(Integer, ForeignKey("genomic_job_run.id"), nullable=True)
event.listen(UserEventMetrics, 'before_insert', model_insert_listener)
event.listen(UserEventMetrics, 'before_update', model_update_listener)
| |
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import time
import httplib2
from oslo_serialization import jsonutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.utils import execute
TEST_IMAGE_DATA = '*' * 5 * units.Ki
TEST_IMAGE_META = {
'name': 'test_image',
'is_public': False,
'disk_format': 'raw',
'container_format': 'ovf',
}
class TestScrubber(functional.FunctionalTest):
"""Test that delayed_delete works and the scrubber deletes"""
def _send_http_request(self, path, method, body=None):
headers = {
'x-image-meta-name': 'test_image',
'x-image-meta-is_public': 'true',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'content-type': 'application/octet-stream'
}
return httplib2.Http().request(path, method, body, headers)
def test_delayed_delete(self):
"""
test that images don't get deleted immediately and that the scrubber
scrubs them
"""
self.cleanup()
self.start_servers(delayed_delete=True, daemon=True,
metadata_encryption_key='')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
response, content = self._send_http_request(path, 'POST', body='XXX')
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image['id'])
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(200, response.status)
response, content = self._send_http_request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('pending_delete', response['x-image-meta-status'])
self.wait_for_scrub(path)
self.stop_servers()
def test_delayed_delete_with_trustedauth_registry(self):
"""
test that images don't get deleted immediately and that the scrubber
scrubs them when registry is operating in trustedauth mode
"""
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.registry_server.deployment_flavor = 'trusted-auth'
self.start_servers(delayed_delete=True, daemon=True,
metadata_encryption_key='',
send_identity_headers=True)
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': 'deae8923-075d-4287-924b-840fb2644874',
'X-Roles': 'admin',
}
headers = {
'x-image-meta-name': 'test_image',
'x-image-meta-is_public': 'true',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'content-type': 'application/octet-stream',
}
headers.update(base_headers)
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', body='XXX',
headers=headers)
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
image_id = image['id']
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE', headers=base_headers)
self.assertEqual(200, response.status)
response, content = http.request(path, 'HEAD', headers=base_headers)
self.assertEqual(200, response.status)
self.assertEqual('pending_delete', response['x-image-meta-status'])
self.wait_for_scrub(path, headers=base_headers)
self.stop_servers()
def test_scrubber_app(self):
"""
test that the glance-scrubber script runs successfully when not in
daemon mode
"""
self.cleanup()
self.start_servers(delayed_delete=True, daemon=False,
metadata_encryption_key='')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
response, content = self._send_http_request(path, 'POST', body='XXX')
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image['id'])
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(200, response.status)
response, content = self._send_http_request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('pending_delete', response['x-image-meta-status'])
# wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# scrub images and make sure they get deleted
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.wait_for_scrub(path)
self.stop_servers()
def test_scrubber_app_with_trustedauth_registry(self):
"""
test that the glance-scrubber script runs successfully when not in
daemon mode and with a registry that operates in trustedauth mode
"""
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.registry_server.deployment_flavor = 'trusted-auth'
self.start_servers(delayed_delete=True, daemon=False,
metadata_encryption_key='',
send_identity_headers=True)
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': 'deae8923-075d-4287-924b-840fb2644874',
'X-Roles': 'admin',
}
headers = {
'x-image-meta-name': 'test_image',
'x-image-meta-is_public': 'true',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'content-type': 'application/octet-stream',
}
headers.update(base_headers)
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', body='XXX',
headers=headers)
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
image_id = image['id']
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE', headers=base_headers)
self.assertEqual(200, response.status)
response, content = http.request(path, 'HEAD', headers=base_headers)
self.assertEqual(200, response.status)
self.assertEqual('pending_delete', response['x-image-meta-status'])
# wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# scrub images and make sure they get deleted
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.wait_for_scrub(path, headers=base_headers)
self.stop_servers()
def test_scrubber_delete_handles_exception(self):
"""
Test that the scrubber handles the case where an
exception occurs when _delete() is called. The scrubber
should not write out queue files in this case.
"""
# Start servers.
self.cleanup()
self.start_servers(delayed_delete=True, daemon=False,
default_store='file')
# Check that we are using a file backend.
self.assertEqual(self.api_server.default_store, 'file')
# add an image
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
response, content = self._send_http_request(path, 'POST', body='XXX')
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
# delete the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image['id'])
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(200, response.status)
# ensure the image is marked pending delete
response, content = self._send_http_request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('pending_delete', response['x-image-meta-status'])
# Remove the file from the backend.
file_path = os.path.join(self.api_server.image_dir, image['id'])
os.remove(file_path)
# Wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# run the scrubber app, and ensure it doesn't fall over
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.wait_for_scrub(path)
self.stop_servers()
def wait_for_scrub(self, path, headers=None):
"""
NOTE(jkoelker) The build servers sometimes take longer than 15 seconds
to scrub. Give it up to 5 min, checking checking every 15 seconds.
When/if it flips to deleted, bail immediately.
"""
http = httplib2.Http()
wait_for = 300 # seconds
check_every = 15 # seconds
for _ in range(wait_for / check_every):
time.sleep(check_every)
response, content = http.request(path, 'HEAD', headers=headers)
if (response['x-image-meta-status'] == 'deleted' and
response['x-image-meta-deleted'] == 'True'):
break
else:
continue
else:
self.fail('image was never scrubbed')
| |
import re
from nltk import tokenize
import wikipedia
from phue import Group
from .process_input import *
# from webcolors import name_to_hex
# (TODO) Figure out a better way to connect with Bridge each time
def weatherAction(message, nlp, owm):
"""Makes the appropriate calls to the OWM API to answer weather queries.
Args:
message (str): An incoming text message.
nlp: Instance of NLProcessor class
owm: Instance of OWM API object
Returns:
A message answering the weather query.
"""
# create the spacy doc object
doc = nlp(message)
# get all GPE (geopolitical entity) mentions
location_mentions = [ent.text for ent in doc.ents if ent.label_ == 'GPE']
# attempt to join city/state/country mentions.
location = ', '.join(location_mentions)
try:
# this object contains all the methods to get to the data
observation = owm.weather_at_place(location)
# get the weather and locations objects
w, l = observation.get_weather(), observation.get_location()
# get location name (according to owm)
city = l.get_name()
# extract the data to return
wind_speed = str(w.get_wind()['speed'])
temp = str(w.get_temperature('celsius')['temp'])
status = str(w.get_detailed_status())
answer = '''{} in {}, with a temperature of {}C and winds {}km/h.'''.format(status.title(), city, temp, wind_speed)
except:
# handle all errors with one error message
answer = "Request cannot be completed. Try 'weather Toronto, Canada (proper capitlization of locations helps me identify them!)'"
return answer
def lightsAction(message, philips_bridge):
"""Makes the appropriate calls to the phue API for changing light settings based on message and
generates a response.
Args:
message (str): An incoming text message.
apis['philips_bridge']: Instance of phue API bridge object
Returns:
A message indicating what action was taking with the phue API.
"""
# set default answer to error message
answer = "Something went wrong..."
# by default, set lights to all lights
lights = philips_bridge.lights
# get the names of all lights
light_names = [l.name.lower() for l in lights]
# get the name of all rooms (Hue calls these 'groups')
groups = philips_bridge.get_group()
room_names = [groups[key]['name'] for key in groups]
# look for room-specific mentions in the sms. If room name mentioned
# set lights equal to all lights in this group
mentioned_room = ''
for room in room_names:
if re.search(room.lower(), message):
mentioned_room = room.lower() + ' '
lights = Group(philips_bridge, room).lights
# use regex and cascading rules to determine action to take with lights
# 1) Setting lights to a certain % intensity
if re.search(r"%|\bpercent\b|\bdim\b", message):
"""
Example text:
- 'dim bedroom lights'
- 'set lights to 50%'
"""
# if the word is dim is mentioned, set to 15%
if re.search('dim', message):
intensity = '15'
# else find the value that directly proceeds '%' or 'percent'
else:
intensity = re.findall(r'(\w+)\s*(%|percent)\s*', message)[0][0]
try:
for l in lights:
l.on = True
# normalize % intensity to a value between 0-254
l.brightness = int(int(intensity)/100*254)
answer = "Setting {}lights to {}%...\U0001F4A1".format(mentioned_room, intensity)
except:
answer = 'Something went wrong while trying to change your lights brightness...'
# 2) Turning lights off
elif re.search(r"\boff\b", message):
"""
Example text:
- 'turn off the bedroom lights'
- 'turn off the lights'
"""
try:
for l in lights:
l.on = False
answer = "Turning {}lights off...\U0001F4A1".format(mentioned_room)
except:
answer = 'Something went wrong while trying to turn your lights off...'
# 3) Turning lights on
elif re.search(r"\bon\b", message):
"""
Example text:
- 'turn on the bedroom lights'
- 'turn on the lights'
"""
try:
for l in lights:
l.on = True
l.brightness = 254
answer = "Turning {}lights on...\U0001F4A1".format(mentioned_room)
except:
answer = 'Something went wrong while trying to turn your lights on...'
# 4) Warming or cooling lights
elif re.search(r"\bwarm\b|\bcool\b", message, re.IGNORECASE):
"""
Example text:
- 'Warm the bedroom lights'
- 'Cool the lights'
"""
warmOrCool = ''
# check if warm or cool was mentioned
if re.search(r'warm', message, re.IGNORECASE):
warmOrCool = 'Warming'
elif re.search(r'cool', message, re.IGNORECASE):
warmOrCool = 'Cooling'
# turn on and then warm or cool lights accordingly
try:
for l in lights:
l.on = True
# cool or warm lights
if warmOrCool == 'Warming':
l.colortemp_k = 2000
# additionaly set lights to 60% brightness
l.brightness = 152
elif warmOrCool == 'Cooling':
l.colortemp_k = 6500
# additionaly set lights to 80% brightness
l.brightness = 254
answer = "{} {}lights...\U0001F4A1".format(warmOrCool, mentioned_room)
except Exception as exception:
answer = 'Something went wrong while trying to warm or cool your lights...'
# 5) Change the lights color
# NOTE THIS IS A BIT OF A HACK, NEEDS TO BE IMPROVED
else:
"""
Example text:
- 'Turn the lights blue'
- 'Turn the bedroom lights red'
"""
# tokenize
tokens = tokenize.wordpunct_tokenize(message)
# filter stopwords
tokens_filtered = remove_stopwords(tokens)
# join filtered message
message_filtered = ' '.join(tokens_filtered)
print("(Highly) processed input: ", message_filtered)
# find the mention of a color name
color = re.findall(r'\s*lights?\s*(\w+)', message_filtered)[0]
# THIS IS A TEMPORARY HACK
colors = {
'blue': 40000,
'red': 100,
'green': 30000,
'orange': 4000,
'pink': 60000,
'purple': 50000,
}
try:
for l in lights:
l.on = True
l.brightness = 254
# this is necessary to reproduce colours accurately
l.colortemp_k = 2000
l.hue = colors[color]
answer = "Turning {}lights {}...\U0001F4A1".format(mentioned_room, color)
except:
answer = 'Something went wrong while trying to change the color of yours lights...'
# return final answer
return answer
def wikipediaAction(message):
"""Makes the appropriate calls to the wikipedia API for answer wiki queries.
Args:
message (str): An incoming text message.
Returns:
A message indicating what action was taking with the wikipedia API.
"""
message = sterilize(message)
# tokenize input
tokens = tokenize.wordpunct_tokenize(message)
# filter stopwords, additionally, remove 'wiki' or 'wikipedia'
tokens_filtered = remove_stopwords(tokens)
tokens_filtered = [token for token in tokens_filtered
if token.lower() != 'wiki' and token.lower() != 'wikipedia']
# join filtered message
message = ' '.join(tokens_filtered)
# for debugging/testing
print("(Highly) processed input: ", message)
# Get the wikipedia summary for the request
try:
summary = wikipedia.summary(message, sentences = 1)
url = wikipedia.page(message).url
answer = summary + "\nSee more here: " + url
if len(answer) > 500:
answer = answer[0:500] + "\nSee wikipedia for more..."
except:
# handle all errors
answer = "Request was not found using Wikipedia. Be more specific?"
return answer
def wolframAction():
pass
def get_reply(message, nlp, apis):
"""
This method processes an incoming SMS and makes calls to the appropriate
APIs via other methods.
Args:
message (str): An incoming text message.
apis (dict): A dictionary containing any API objects we need to generate replies.
nlp: Instance of Spacy NLP object.
Returns:
A response to message either answering a request or indicating what actions were taken.
"""
## TEXT PREPROCESSING
message = sterilize(message)
print("(Simply) processed input: ", message)
# Look for keyword triggers in the incoming SMS
## WEATHER
if re.search(r'\b(?i)weather\b', message):
if apis['owm'] != None:
print('here')
answer = weatherAction(message, nlp, apis['owm'])
else:
answer = "Hmm. It looks like I haven't been setup to answer weather requests. Take a look at the config.ini file!"
## WOLFRAM
elif re.search("(?i)wolfram", message,):
if apis['wolfram'] != None:
answer = wolframAction(message)
else:
answer = "Hmm. It looks like I haven't been setup to answer Wolfram requests. Take a look at the config.ini file!"
## WIKI
elif re.search(r'(?i)wiki(pedia)?', message):
answer = wikipediaAction(message)
# LIGHTS
elif re.search(r'(?i)lamps?|(?i)lights?', message):
if apis['philips_bridge'] != None:
answer = lightsAction(message, apis['philips_bridge'])
else:
answer = "Hmm. It looks like I haven't been setup to work with your Hue lights. Take a look at the config.ini file!"
# the message contains no keywords. Display help prompt
else:
answer = ("\nStuck? Here are some things you can ask me:\n\n'wolfram' "
"{a question}\n'wiki' {wikipedia request}\n'weather' {place}\n'turn lights off'\n"
"\nNote: some of these features require additional setup.")
# return the formulated answer
return answer
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.gis.db import models
# trips model
class trips(models.Model):
level_0 = models.IntegerField(default=0, null=True, blank=True)
miscidx = models.IntegerField(default=0, null=True, blank=True)
index = models.IntegerField(default=0, null=True, blank=True)
tripid = models.BigIntegerField(default=0, null=True, blank=True)
tripdate = models.DateField(default=None, null=True, blank=True)
picktime = models.TimeField(default=None, null=True, blank=True)
droptime = models.TimeField(default=None, null=True, blank=True)
provider = models.CharField(max_length=255, null=True, blank=True)
status = models.CharField(max_length=255, null=True, blank=True)
routeid = models.IntegerField(default=0, null=True, blank=True)
pickhousenumber = models.CharField(max_length=255, null=True, blank=True)
pickaddress1 = models.CharField(max_length=255, null=True, blank=True)
pickcity = models.CharField(max_length=255, null=True, blank=True)
pickcounty = models.CharField(max_length=255, null=True, blank=True)
pickzip = models.CharField(max_length=255, null=True, blank=True)
drophousenumber = models.CharField(max_length=255, null=True, blank=True)
dropaddress1 = models.CharField(max_length=255, null=True, blank=True)
dropcity = models.CharField(max_length=255, null=True, blank=True)
dropcounty = models.CharField(max_length=255, null=True, blank=True)
dropzip = models.CharField(max_length=255, null=True, blank=True)
shared = models.BooleanField(default=False, blank=True)
puzip = models.CharField(max_length=255, null=True, blank=True)
dozip = models.CharField(max_length=255, null=True, blank=True)
uid = models.IntegerField(default=0, null=True, blank=True)
puid = models.CharField(max_length=255, null=True, blank=True)
duid = models.CharField(max_length=255, null=True, blank=True)
count = models.IntegerField(default=0, null=True, blank=True)
pickboro = models.IntegerField(default=0, null=True, blank=True)
dropboro = models.IntegerField(default=0, null=True, blank=True)
upast = models.CharField(max_length=255, null=True, blank=True)
udast = models.CharField(max_length=255, null=True, blank=True)
pickdate = models.DateField(default=None, null=True, blank=True)
dropdate = models.DateField(default=None, null=True, blank=True)
pickhour = models.IntegerField(default=0, null=True, blank=True)
pickmin = models.IntegerField(default=0, null=True, blank=True)
drophour = models.IntegerField(default=0, null=True, blank=True)
dropmin = models.IntegerField(default=0, null=True, blank=True)
pickdaymins = models.IntegerField(default=0, null=True, blank=True)
dropdaymins = models.IntegerField(default=0, null=True, blank=True)
tripminsdelta = models.IntegerField(default=0, null=True, blank=True)
p_bctcb2010 = models.BigIntegerField(default=0, null=True, blank=True)
p_lat = models.FloatField(default=0, null=True, blank=True)
p_lng = models.FloatField(default=0, null=True, blank=True)
p_count = models.IntegerField(default=0, null=True, blank=True)
p_val = models.BooleanField(default=False, blank=True)
d_bctcb2010 = models.BigIntegerField(default=0, null=True, blank=True)
d_lat = models.FloatField(default=0, null=True, blank=True)
d_lng = models.FloatField(default=0, null=True, blank=True)
d_count = models.IntegerField(default=0, null=True, blank=True)
d_val = models.BooleanField(default=False, blank=True)
p_geoid = models.CharField(max_length=255, null=True, blank=True)
p_xcoord = models.FloatField(default=0, null=True, blank=True)
p_ycoord = models.FloatField(default=0, null=True, blank=True)
d_geoid = models.CharField(max_length=255, null=True, blank=True)
d_xcoord = models.FloatField(default=0, null=True, blank=True)
d_ycoord = models.FloatField(default=0, null=True, blank=True)
p_geoid_bg = models.CharField(max_length=255, null=True, blank=True)
d_geoid_bg = models.CharField(max_length=255, null=True, blank=True)
p_geoid_tr = models.CharField(max_length=255, null=True, blank=True)
d_geoid_tr = models.CharField(max_length=255, null=True, blank=True)
geoid_pair = models.CharField(max_length=255, null=True, blank=True)
osrmminsdelta = models.FloatField(default=0, null=True, blank=True)
osrm_dist = models.FloatField(default=0, null=True, blank=True)
osrm_rval = models.BooleanField(default=False, blank=True)
p_nr_bus = models.FloatField(default=0, null=True, blank=True)
d_nr_bus = models.FloatField(default=0, null=True, blank=True)
p_nr_sub = models.FloatField(default=0, null=True, blank=True)
d_nr_sub = models.FloatField(default=0, null=True, blank=True)
p_nr_hea = models.FloatField(default=0, null=True, blank=True)
d_nr_hea = models.FloatField(default=0, null=True, blank=True)
p_p_count = models.IntegerField(default=0, null=True, blank=True)
p_d_count = models.IntegerField(default=0, null=True, blank=True)
p_a_count = models.IntegerField(default=0, null=True, blank=True)
p_p0010001 = models.IntegerField(default=0, null=True, blank=True)
p_p0030001 = models.IntegerField(default=0, null=True, blank=True)
p_p0030002 = models.IntegerField(default=0, null=True, blank=True)
p_p0030003 = models.IntegerField(default=0, null=True, blank=True)
p_p0030004 = models.IntegerField(default=0, null=True, blank=True)
p_p0030005 = models.IntegerField(default=0, null=True, blank=True)
p_p0030006 = models.IntegerField(default=0, null=True, blank=True)
p_p0030007 = models.IntegerField(default=0, null=True, blank=True)
p_p0030008 = models.IntegerField(default=0, null=True, blank=True)
p_p0040001 = models.IntegerField(default=0, null=True, blank=True)
p_p0040002 = models.IntegerField(default=0, null=True, blank=True)
p_p0040003 = models.IntegerField(default=0, null=True, blank=True)
p_p0120001 = models.IntegerField(default=0, null=True, blank=True)
p_p0120002 = models.IntegerField(default=0, null=True, blank=True)
p_p0120003 = models.IntegerField(default=0, null=True, blank=True)
p_p0120004 = models.IntegerField(default=0, null=True, blank=True)
p_p0120005 = models.IntegerField(default=0, null=True, blank=True)
p_p0120006 = models.IntegerField(default=0, null=True, blank=True)
p_p0120007 = models.IntegerField(default=0, null=True, blank=True)
p_p0120008 = models.IntegerField(default=0, null=True, blank=True)
p_p0120009 = models.IntegerField(default=0, null=True, blank=True)
p_p0120010 = models.IntegerField(default=0, null=True, blank=True)
p_p0120011 = models.IntegerField(default=0, null=True, blank=True)
p_p0120012 = models.IntegerField(default=0, null=True, blank=True)
p_p0120013 = models.IntegerField(default=0, null=True, blank=True)
p_p0120014 = models.IntegerField(default=0, null=True, blank=True)
p_p0120015 = models.IntegerField(default=0, null=True, blank=True)
p_p0120016 = models.IntegerField(default=0, null=True, blank=True)
p_p0120017 = models.IntegerField(default=0, null=True, blank=True)
p_p0120018 = models.IntegerField(default=0, null=True, blank=True)
p_p0120019 = models.IntegerField(default=0, null=True, blank=True)
p_p0120020 = models.IntegerField(default=0, null=True, blank=True)
p_p0120021 = models.IntegerField(default=0, null=True, blank=True)
p_p0120022 = models.IntegerField(default=0, null=True, blank=True)
p_p0120023 = models.IntegerField(default=0, null=True, blank=True)
p_p0120024 = models.IntegerField(default=0, null=True, blank=True)
p_p0120025 = models.IntegerField(default=0, null=True, blank=True)
p_p0120026 = models.IntegerField(default=0, null=True, blank=True)
p_p0120027 = models.IntegerField(default=0, null=True, blank=True)
p_p0120028 = models.IntegerField(default=0, null=True, blank=True)
p_p0120029 = models.IntegerField(default=0, null=True, blank=True)
p_p0120030 = models.IntegerField(default=0, null=True, blank=True)
p_p0120031 = models.IntegerField(default=0, null=True, blank=True)
p_p0120032 = models.IntegerField(default=0, null=True, blank=True)
p_p0120033 = models.IntegerField(default=0, null=True, blank=True)
p_p0120034 = models.IntegerField(default=0, null=True, blank=True)
p_p0120035 = models.IntegerField(default=0, null=True, blank=True)
p_p0120036 = models.IntegerField(default=0, null=True, blank=True)
p_p0120037 = models.IntegerField(default=0, null=True, blank=True)
p_p0120038 = models.IntegerField(default=0, null=True, blank=True)
p_p0120039 = models.IntegerField(default=0, null=True, blank=True)
p_p0120040 = models.IntegerField(default=0, null=True, blank=True)
p_p0120041 = models.IntegerField(default=0, null=True, blank=True)
p_p0120042 = models.IntegerField(default=0, null=True, blank=True)
p_p0120043 = models.IntegerField(default=0, null=True, blank=True)
p_p0120044 = models.IntegerField(default=0, null=True, blank=True)
p_p0120045 = models.IntegerField(default=0, null=True, blank=True)
p_p0120046 = models.IntegerField(default=0, null=True, blank=True)
p_p0120047 = models.IntegerField(default=0, null=True, blank=True)
p_p0120048 = models.IntegerField(default=0, null=True, blank=True)
p_p0120049 = models.IntegerField(default=0, null=True, blank=True)
p_h00010001 = models.IntegerField(default=0, null=True, blank=True)
p_h0030001 = models.IntegerField(default=0, null=True, blank=True)
p_h0030002 = models.IntegerField(default=0, null=True, blank=True)
p_h0030003 = models.IntegerField(default=0, null=True, blank=True)
p_h0040001 = models.IntegerField(default=0, null=True, blank=True)
p_h0040002 = models.IntegerField(default=0, null=True, blank=True)
p_h0040003 = models.IntegerField(default=0, null=True, blank=True)
p_h0040004 = models.IntegerField(default=0, null=True, blank=True)
p_h0050001 = models.IntegerField(default=0, null=True, blank=True)
p_h0050002 = models.IntegerField(default=0, null=True, blank=True)
p_h0050003 = models.IntegerField(default=0, null=True, blank=True)
p_h0050004 = models.IntegerField(default=0, null=True, blank=True)
p_h0050005 = models.IntegerField(default=0, null=True, blank=True)
p_h0050006 = models.IntegerField(default=0, null=True, blank=True)
p_h0050007 = models.IntegerField(default=0, null=True, blank=True)
p_h0050008 = models.IntegerField(default=0, null=True, blank=True)
p_p_pop = models.FloatField(default=0, null=True, blank=True)
p_d_pop = models.FloatField(default=0, null=True, blank=True)
p_a_pop = models.FloatField(default=0, null=True, blank=True)
d_p_count = models.IntegerField(default=0, null=True, blank=True)
d_d_count = models.IntegerField(default=0, null=True, blank=True)
d_a_count = models.IntegerField(default=0, null=True, blank=True)
d_p0010001 = models.IntegerField(default=0, null=True, blank=True)
d_p0030001 = models.IntegerField(default=0, null=True, blank=True)
d_p0030002 = models.IntegerField(default=0, null=True, blank=True)
d_p0030003 = models.IntegerField(default=0, null=True, blank=True)
d_p0030004 = models.IntegerField(default=0, null=True, blank=True)
d_p0030005 = models.IntegerField(default=0, null=True, blank=True)
d_p0030006 = models.IntegerField(default=0, null=True, blank=True)
d_p0030007 = models.IntegerField(default=0, null=True, blank=True)
d_p0030008 = models.IntegerField(default=0, null=True, blank=True)
d_p0040001 = models.IntegerField(default=0, null=True, blank=True)
d_p0040002 = models.IntegerField(default=0, null=True, blank=True)
d_p0040003 = models.IntegerField(default=0, null=True, blank=True)
d_p0120001 = models.IntegerField(default=0, null=True, blank=True)
d_p0120002 = models.IntegerField(default=0, null=True, blank=True)
d_p0120003 = models.IntegerField(default=0, null=True, blank=True)
d_p0120004 = models.IntegerField(default=0, null=True, blank=True)
d_p0120005 = models.IntegerField(default=0, null=True, blank=True)
d_p0120006 = models.IntegerField(default=0, null=True, blank=True)
d_p0120007 = models.IntegerField(default=0, null=True, blank=True)
d_p0120008 = models.IntegerField(default=0, null=True, blank=True)
d_p0120009 = models.IntegerField(default=0, null=True, blank=True)
d_p0120010 = models.IntegerField(default=0, null=True, blank=True)
d_p0120011 = models.IntegerField(default=0, null=True, blank=True)
d_p0120012 = models.IntegerField(default=0, null=True, blank=True)
d_p0120013 = models.IntegerField(default=0, null=True, blank=True)
d_p0120014 = models.IntegerField(default=0, null=True, blank=True)
d_p0120015 = models.IntegerField(default=0, null=True, blank=True)
d_p0120016 = models.IntegerField(default=0, null=True, blank=True)
d_p0120017 = models.IntegerField(default=0, null=True, blank=True)
d_p0120018 = models.IntegerField(default=0, null=True, blank=True)
d_p0120019 = models.IntegerField(default=0, null=True, blank=True)
d_p0120020 = models.IntegerField(default=0, null=True, blank=True)
d_p0120021 = models.IntegerField(default=0, null=True, blank=True)
d_p0120022 = models.IntegerField(default=0, null=True, blank=True)
d_p0120023 = models.IntegerField(default=0, null=True, blank=True)
d_p0120024 = models.IntegerField(default=0, null=True, blank=True)
d_p0120025 = models.IntegerField(default=0, null=True, blank=True)
d_p0120026 = models.IntegerField(default=0, null=True, blank=True)
d_p0120027 = models.IntegerField(default=0, null=True, blank=True)
d_p0120028 = models.IntegerField(default=0, null=True, blank=True)
d_p0120029 = models.IntegerField(default=0, null=True, blank=True)
d_p0120030 = models.IntegerField(default=0, null=True, blank=True)
d_p0120031 = models.IntegerField(default=0, null=True, blank=True)
d_p0120032 = models.IntegerField(default=0, null=True, blank=True)
d_p0120033 = models.IntegerField(default=0, null=True, blank=True)
d_p0120034 = models.IntegerField(default=0, null=True, blank=True)
d_p0120035 = models.IntegerField(default=0, null=True, blank=True)
d_p0120036 = models.IntegerField(default=0, null=True, blank=True)
d_p0120037 = models.IntegerField(default=0, null=True, blank=True)
d_p0120038 = models.IntegerField(default=0, null=True, blank=True)
d_p0120039 = models.IntegerField(default=0, null=True, blank=True)
d_p0120040 = models.IntegerField(default=0, null=True, blank=True)
d_p0120041 = models.IntegerField(default=0, null=True, blank=True)
d_p0120042 = models.IntegerField(default=0, null=True, blank=True)
d_p0120043 = models.IntegerField(default=0, null=True, blank=True)
d_p0120044 = models.IntegerField(default=0, null=True, blank=True)
d_p0120045 = models.IntegerField(default=0, null=True, blank=True)
d_p0120046 = models.IntegerField(default=0, null=True, blank=True)
d_p0120047 = models.IntegerField(default=0, null=True, blank=True)
d_p0120048 = models.IntegerField(default=0, null=True, blank=True)
d_p0120049 = models.IntegerField(default=0, null=True, blank=True)
d_h00010001 = models.IntegerField(default=0, null=True, blank=True)
d_h0030001 = models.IntegerField(default=0, null=True, blank=True)
d_h0030002 = models.IntegerField(default=0, null=True, blank=True)
d_h0030003 = models.IntegerField(default=0, null=True, blank=True)
d_h0040001 = models.IntegerField(default=0, null=True, blank=True)
d_h0040002 = models.IntegerField(default=0, null=True, blank=True)
d_h0040003 = models.IntegerField(default=0, null=True, blank=True)
d_h0040004 = models.IntegerField(default=0, null=True, blank=True)
d_h0050001 = models.IntegerField(default=0, null=True, blank=True)
d_h0050002 = models.IntegerField(default=0, null=True, blank=True)
d_h0050003 = models.IntegerField(default=0, null=True, blank=True)
d_h0050004 = models.IntegerField(default=0, null=True, blank=True)
d_h0050005 = models.IntegerField(default=0, null=True, blank=True)
d_h0050006 = models.IntegerField(default=0, null=True, blank=True)
d_h0050007 = models.IntegerField(default=0, null=True, blank=True)
d_h0050008 = models.IntegerField(default=0, null=True, blank=True)
d_p_pop = models.FloatField(default=0, null=True, blank=True)
d_d_pop = models.FloatField(default=0, null=True, blank=True)
d_a_pop = models.FloatField(default=0, null=True, blank=True)
p_b01001_001e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_002e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_003e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_004e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_005e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_006e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_007e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_008e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_009e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_010e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_011e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_012e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_013e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_014e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_015e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_016e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_017e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_018e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_019e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_020e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_021e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_022e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_023e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_024e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_025e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_026e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_027e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_028e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_029e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_030e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_031e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_032e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_033e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_034e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_035e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_036e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_037e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_038e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_039e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_040e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_041e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_042e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_043e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_044e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_045e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_046e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_047e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_048e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_049e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01003_001e_x = models.IntegerField(default=0, null=True, blank=True)
p_b19013_001e_x = models.IntegerField(default=0, null=True, blank=True)
p_b18101_001e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_001e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_002e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_003e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_004e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_005e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_006e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_007e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_008e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_009e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_010e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_011e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_012e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_013e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_014e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_015e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_016e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_017e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_018e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_019e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_020e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_021e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_022e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_023e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_024e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_025e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_026e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_027e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_028e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_029e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_030e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_031e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_032e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_033e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_034e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_035e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_036e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_037e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_038e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_039e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_040e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_041e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_042e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_043e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_044e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_045e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_046e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_047e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_048e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01001_049e_x = models.IntegerField(default=0, null=True, blank=True)
d_b01003_001e_x = models.IntegerField(default=0, null=True, blank=True)
d_b19013_001e_x = models.IntegerField(default=0, null=True, blank=True)
d_b18101_001e_x = models.IntegerField(default=0, null=True, blank=True)
p_b01001_001e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_002e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_003e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_004e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_005e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_006e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_007e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_008e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_009e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_010e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_011e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_012e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_013e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_014e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_015e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_016e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_017e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_018e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_019e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_020e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_021e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_022e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_023e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_024e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_025e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_026e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_027e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_028e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_029e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_030e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_031e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_032e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_033e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_034e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_035e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_036e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_037e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_038e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_039e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_040e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_041e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_042e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_043e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_044e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_045e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_046e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_047e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_048e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01001_049e_y = models.IntegerField(default=0, null=True, blank=True)
p_b01003_001e_y = models.IntegerField(default=0, null=True, blank=True)
p_b19013_001e_y = models.IntegerField(default=0, null=True, blank=True)
p_b18101_001e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_001e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_002e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_003e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_004e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_005e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_006e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_007e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_008e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_009e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_010e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_011e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_012e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_013e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_014e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_015e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_016e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_017e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_018e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_019e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_020e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_021e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_022e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_023e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_024e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_025e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_026e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_027e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_028e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_029e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_030e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_031e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_032e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_033e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_034e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_035e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_036e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_037e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_038e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_039e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_040e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_041e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_042e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_043e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_044e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_045e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_046e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_047e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_048e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01001_049e_y = models.IntegerField(default=0, null=True, blank=True)
d_b01003_001e_y = models.IntegerField(default=0, null=True, blank=True)
d_b19013_001e_y = models.IntegerField(default=0, null=True, blank=True)
d_b18101_001e_y = models.IntegerField(default=0, null=True, blank=True)
# pickups model
class pickup_locations(models.Model):
trip = models.OneToOneField(trips, on_delete=models.CASCADE)
tripid = models.BigIntegerField(default=0, null=True, blank=True)
p_lat = models.FloatField(default=0, null=True, blank=True)
p_lng = models.FloatField(default=0, null=True, blank=True)
the_geom = models.PointField(null=True, blank=True)
# dropoffs model
class dropoff_locations(models.Model):
trip = models.OneToOneField(trips, on_delete=models.CASCADE)
tripid = models.BigIntegerField(default=0, null=True, blank=True)
d_lat = models.FloatField(default=0, null=True, blank=True)
d_lng = models.FloatField(default=0, null=True, blank=True)
the_geom = models.PointField(null=True, blank=True)
#ada subway stations model
class ada_subway_stations(models.Model):
cartodb_id = models.IntegerField()
lat = models.FloatField()
lon = models.FloatField()
objectid = models.IntegerField()
stop_id = models.CharField(max_length=80)
stop_name = models.CharField(max_length=80)
stop_lat = models.FloatField()
stop_lon = models.FloatField()
geoid = models.IntegerField()
namelsad = models.CharField(max_length=80)
stop_id2 = models.CharField(max_length=80)
trains = models.CharField(max_length=80)
geom = models.PointField(srid=4326)
# Auto-generated `LayerMapping` dictionary for ada_subway_stations model
ada_subway_stations_mapping = {
'cartodb_id' : 'cartodb_id',
'lat' : 'lat',
'lon' : 'lon',
'objectid' : 'objectid',
'stop_id' : 'stop_id',
'stop_name' : 'stop_name',
'stop_lat' : 'stop_lat',
'stop_lon' : 'stop_lon',
'geoid' : 'geoid',
'namelsad' : 'namelsad',
'stop_id2' : 'stop_id2',
'trains' : 'trains',
'geom' : 'POINT',
}
#all subway stations model
class nyct_subway_stops(models.Model):
cartodb_id = models.IntegerField()
objectid = models.IntegerField()
stop_id = models.CharField(max_length=80)
stop_name = models.CharField(max_length=80)
stop_lat = models.FloatField()
stop_lon = models.FloatField()
name_cur = models.CharField(max_length=80)
routes_wkd = models.CharField(max_length=80)
routes_off = models.CharField(max_length=80)
routes_all = models.CharField(max_length=80)
name_prop = models.CharField(max_length=80)
route_lbl = models.CharField(max_length=80)
ada_access = models.IntegerField()
geom = models.PointField(srid=4326)
# Auto-generated `LayerMapping` dictionary for nyct_subway_stops model
nyct_subway_stops_mapping = {
'cartodb_id' : 'cartodb_id',
'objectid' : 'objectid',
'stop_id' : 'stop_id',
'stop_name' : 'stop_name',
'stop_lat' : 'stop_lat',
'stop_lon' : 'stop_lon',
'name_cur' : 'name_cur',
'routes_wkd' : 'routes_wkd',
'routes_off' : 'routes_off',
'routes_all' : 'routes_all',
'name_prop' : 'name_prop',
'route_lbl' : 'route_lbl',
'ada_access' : 'ada_access',
'geom' : 'POINT',
}
#NYC ODP census tracts model
class nyc_odp_census_tracts(models.Model):
ntacode = models.CharField(max_length=254)
ctlabel = models.CharField(max_length=254)
cdeligibil = models.CharField(max_length=254)
shape_leng = models.FloatField()
ntaname = models.CharField(max_length=254)
boro_name = models.CharField(max_length=254)
boro_ct201 = models.CharField(max_length=254)
shape_area = models.FloatField()
boro_code = models.CharField(max_length=254)
ct2010 = models.CharField(max_length=254)
puma = models.CharField(max_length=254)
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for nyc_odp_census_tracts model
nyc_odp_census_tracts_mapping = {
'ntacode' : 'ntacode',
'ctlabel' : 'ctlabel',
'cdeligibil' : 'cdeligibil',
'shape_leng' : 'shape_leng',
'ntaname' : 'ntaname',
'boro_name' : 'boro_name',
'boro_ct201' : 'boro_ct201',
'shape_area' : 'shape_area',
'boro_code' : 'boro_code',
'ct2010' : 'ct2010',
'puma' : 'puma',
'geom' : 'MULTIPOLYGON',
}
#NYC ODP borough boundary model
class nyc_odp_borough_boundaries(models.Model):
shape_leng = models.FloatField()
boro_name = models.CharField(max_length=254)
boro_code = models.FloatField()
shape_area = models.FloatField()
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for nyc_odp_borough_boundaries model
nyc_odp_borough_boundaries_mapping = {
'shape_leng' : 'shape_leng',
'boro_name' : 'boro_name',
'boro_code' : 'boro_code',
'shape_area' : 'shape_area',
'geom' : 'MULTIPOLYGON',
}
#NYC ODP neighborhood tabulation areas model
class nyc_odp_ntas(models.Model):
county_fip = models.CharField(max_length=254)
shape_area = models.FloatField()
shape_leng = models.FloatField()
ntacode = models.CharField(max_length=254)
boro_code = models.FloatField()
ntaname = models.CharField(max_length=254)
boro_name = models.CharField(max_length=254)
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for nyc_odp_ntas model
nyc_odp_ntas_mapping = {
'county_fip' : 'county_fip',
'shape_area' : 'shape_area',
'shape_leng' : 'shape_leng',
'ntacode' : 'ntacode',
'boro_code' : 'boro_code',
'ntaname' : 'ntaname',
'boro_name' : 'boro_name',
'geom' : 'MULTIPOLYGON',
}
#NYC ODP zip code tabulation areas model
class nyc_odp_zctas_remove_building_zips(models.Model):
zipcode = models.CharField(max_length=5)
bldgzip = models.CharField(max_length=1)
po_name = models.CharField(max_length=28)
population = models.FloatField()
area = models.FloatField()
state = models.CharField(max_length=2)
county = models.CharField(max_length=20)
st_fips = models.CharField(max_length=2)
cty_fips = models.CharField(max_length=3)
url = models.CharField(max_length=200)
shape_area = models.FloatField()
shape_len = models.FloatField()
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for nyc_odp_zctas_remove_building_zips model
nyc_odp_zctas_remove_building_zips_mapping = {
'zipcode' : 'ZIPCODE',
'bldgzip' : 'BLDGZIP',
'po_name' : 'PO_NAME',
'population' : 'POPULATION',
'area' : 'AREA',
'state' : 'STATE',
'county' : 'COUNTY',
'st_fips' : 'ST_FIPS',
'cty_fips' : 'CTY_FIPS',
'url' : 'URL',
'shape_area' : 'SHAPE_AREA',
'shape_len' : 'SHAPE_LEN',
'geom' : 'MULTIPOLYGON',
}
#Rudin Center / NYC ODP medical centers
class medical_centers(models.Model):
borough = models.CharField(max_length=254)
name = models.CharField(max_length=254)
address = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
geom = models.PointField(srid=4326)
# Auto-generated `LayerMapping` dictionary for medical_centers model
medical_centers_mapping = {
'borough' : 'borough',
'name' : 'name',
'address' : 'address',
'latitude' : 'latitude',
'longitude' : 'longitude',
'geom' : 'POINT',
}
class trips_pickup_dropoff_tracts(models.Model):
tripid = models.BigIntegerField(default=0, null=True, blank=True)
pickup_tract = models.CharField(max_length=254, null=True, blank=True)
dropoff_tract = models.CharField(max_length=254, null=True, blank=True)
| |
from __future__ import absolute_import
from __future__ import print_function
import sys
from copy import copy, deepcopy
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 1000)
from signals import *
def find_all_signals(_df, comission=0.0, max_position_size=1, debug=False):
"""
Function finds and returns all signals that could result in profitable deals taking into account comission.
E.g. it will return Buy and Sell signal if ask price at Buy is lower than bid price at Sell minus the comission.
Then it will move one step forward and consider already seen Sell signal and the next Buy for the possible
profitable short deal.
"""
df = deepcopy(_df)
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy Mod'] = np.zeros(df.shape[0])
df['Sell Mod'] = np.zeros(df.shape[0])
inflection_points = pd.DataFrame({'Buy': df["askpx_"].diff().shift(-1) > 0, 'Sell': df["bidpx_"].diff().shift(-1) < 0})
iterator = inflection_points.iterrows()
max_count = 0
position_size = 0
try:
while True:
#for i in range(0, 100):
idx_open, next_idx, row_open, sig_type_open = next_signal(iterator, df)
iterator = inflection_points.loc[next_idx:].iterrows()
iterator.next()
df[sig_type_open][idx_open] = 1
except TypeError:
print("Iteration stopped")
print("Buy candidates: {} Sell candidates: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
candidates = df[(df['Buy'] != 0) | (df['Sell'] != 0)].iterrows()
idx_open, row_open = candidates.next()
for idx, row in candidates:
if row_open['Buy'] == 1 and (df["bidpx_"][idx] > (df["askpx_"][idx_open] + comission)):
df['Buy Mod'][idx_open] += 1
df['Sell Mod'][idx] += 1
elif row_open['Sell'] == 1 and (df["askpx_"][idx] < (df["bidpx_"][idx_open] - comission)):
df['Sell Mod'][idx_open] += 1
df['Buy Mod'][idx] += 1
idx_open = idx
row_open = row
df = df.rename(columns={"Buy": "Buy Candidates", "Sell": "Sell Candidtates"})
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy'][df['Buy Mod'] != 0] = 1
df['Sell'][df['Sell Mod'] != 0] = 1
print("Buy: {} Sell: {}".format(df[df['Buy Mod'] != 0].count()['Buy Mod'], df[df['Sell Mod'] != 0].count()['Sell Mod']))
print("Buy: {} Sell: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
return df
def next_signal(iterator, df=None, sig_type=None, outer_idx=None, outer_row=None):
"""
Recursive function to find best signal (Buy or Sell) of the sequnce of possible candidates (inflection points).
It compares current candidate and next candidates, if one of the next candidates of the same type is better,
e.g. if current candidate is Buy with ask price 20 and next candidate (1) is Buy with ask price 10,
then next candidate (2) is Buy with ask price 15, the function should return next candidate (1) with ask price 10
when it will face first consequtive Sell candidate.
"""
prev_idx = outer_idx
best_idx = outer_idx
best_row = outer_row
for idx, row in iterator:
# print(idx, row)
if row['Buy'] or row['Sell']:
inner_sig_type = 'Buy' if row['Buy'] else 'Sell'
print("Inner signal: ", idx, inner_sig_type)
if sig_type:
print("Outer signal: ", outer_idx, sig_type)
if inner_sig_type == sig_type:
print("Compare {} bid: {} ask: {} with {} bid: {} ask: {}".
format(best_idx, df["bidpx_"][best_idx], df["askpx_"][best_idx], idx, df["bidpx_"][idx], df["askpx_"][idx]))
if sig_type == 'Buy' and df["askpx_"][idx] < df["askpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
best_idx, best_row = idx, row
#return idx, idx, row, sig_type
if sig_type == 'Sell' and df["bidpx_"][idx] > df["bidpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["bidpx_"][idx]))
best_idx, best_row = idx, row
#return idx, idx, row, sig_type
prev_idx = idx
else:
print("Best {} candidate at {}, break...".format(sig_type, outer_idx))
return best_idx, prev_idx, best_row, sig_type
else:
print("Recursion")
return next_signal(iterator, df, inner_sig_type, idx, row)
def set_positions(_df):
df = deepcopy(_df)
df['Pos'] = np.zeros(df.shape[0])
last_position = 0
longs = 0
shorts = 0
iterator = df.iterrows()
last_idx, last_row = iterator.next()
for idx, row in iterator:
df.loc[idx]['Pos'] = row['Buy Mod'] - row ['Sell Mod'] + last_row['Pos']
last_idx, last_row = idx, row
if df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] > 0:
longs += 1
elif df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] < 0:
shorts += 1
last_position = df.loc[idx]['Pos']
print("Long positions: {} Short positions: {}".format(longs, shorts))
return df
def find_signals(df, sig_type, comission=0.0, debug=False):
colnames = {"Buy": ("Buy", "Sell Close"),
"Sell": ("Sell", "Buy Close")}
inflection_points_buy = df["askpx_"].diff().shift(-1) > 0
inflection_points_sell = df["bidpx_"].diff().shift(-1) < 0
iterator = inflection_points_buy.iteritems() if sig_type == "Buy" else inflection_points_sell.iteritems()
inflection_points = inflection_points_buy if sig_type == "Buy" else inflection_points_sell
inner_inflection_points = inflection_points_sell if sig_type == "Buy" else inflection_points_buy
max_count = 0
(major_colname, minor_colname) = colnames[sig_type]
df[major_colname] = np.zeros(df.shape[0])
df[minor_colname] = np.zeros(df.shape[0])
for idx, val in iterator:
if max_count > 10000 and debug:
print("Max count reached, break...")
break
inner_iterator = inner_inflection_points.loc[idx:].iteritems()
if df[df[minor_colname]==1].empty:
can_open = True
else:
can_open = idx > df[df[minor_colname]==1].index[-1]
max_count += 1
if val and can_open:
print("{} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
for inner_idx, inner_val in inner_iterator:
if inner_idx > idx:
if sig_type == "Buy":
if df["askpx_"][inner_idx] < df["askpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx, df["askpx_"][inner_idx]))
break
if df["bidpx_"][inner_idx] > (df["askpx_"][idx] + comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Buy at {} with price {}".format(idx, df["askpx_"][idx]))
print("Sell at {} with price {}".format(inner_idx, df["bidpx_"][inner_idx]))
break
elif sig_type == "Sell":
if df["bidpx_"][inner_idx] > df["bidpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx, df["bidpx_"][inner_idx]))
break
if df["askpx_"][inner_idx] < (df["bidpx_"][idx] - comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Sell at {} with price {}".format(idx, df["bidpx_"][idx]))
print("Buy at {} with price {}".format(inner_idx, df["askpx_"][inner_idx]))
break
return df
def filter_signals(df):
buys = df["Buy"] + df["Buy Close"]
df["Buy Mod"] = np.zeros(df.shape[0])
df["Buy Mod"][buys == 2] = 1
sells = df["Sell"] + df["Sell Close"]
df["Sell Mod"] = np.zeros(df.shape[0])
df["Sell Mod"][sells == 2] = 1
iterator = df.iterrows()
current_signal = 0
for idx, row in iterator:
current_signal = row["Buy Mod"] - row["Sell Mod"]
if current_signal != 0:
print("Signal {} at {}".format(current_signal, idx))
inner_iterator = df.loc[idx:].iterrows()
inner_iterator.next()
for inner_idx, inner_row in inner_iterator:
next_signal = inner_row["Buy Mod"] - inner_row["Sell Mod"]
if next_signal == current_signal:
print("Consecutive similar signal {} at {}".format(next_signal, inner_idx))
if current_signal == 1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Sell"] == 1]
best_candidate = candidates["bidpx_"].idxmax()
print(df.loc[best_candidate])
df["Sell Mod"].loc[best_candidate] = 1
break
elif current_signal == -1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Buy"] == 1]
best_candidate = candidates["askpx_"].idxmin()
print(df.loc[best_candidate])
df["Buy Mod"].loc[best_candidate] = 1
break
elif next_signal != 0 and next_signal != current_signal:
break
df["Buy Open"] = df["Buy"]
df["Sell Open"] = df["Sell"]
df = df.drop(["Buy", "Sell"], axis=1)
print(df.columns)
df = df.rename(columns={"Buy Mod": "Buy", "Sell Mod": "Sell"})
print(df.columns)
# df = df.drop(["Buy Close", "Sell Close"], axis=1)
return df
def make_spans(df, sig_type):
span_colname = "Buys" if sig_type == "Buy" else "Sells"
reversed_df = df[::-1]
df[span_colname] = np.zeros(df.shape[0])
for idx in df[sig_type][df[sig_type] == 1].index:
signal_val = df.loc[idx]
iterator = reversed_df.loc[idx:].iterrows()
_d = print("Outer loop:", idx, signal_val["askpx_"]) if sig_type == "Buy" else print("Outer loop:", idx, signal_val["bidpx_"])
for i, val in iterator:
# _d = print("Inner loop:", i, val["askpx_"]) if sig_type == "Buy" else print("Inner loop:", i, val["bidpx_"])
if sig_type == "Buy":
if val["askpx_"] == signal_val["askpx_"]:
# print("Add to buys")
df[span_colname][i] = 1
else:
break
elif sig_type == "Sell":
if val["bidpx_"] == signal_val["bidpx_"]:
# print("Add to sells")
df[span_colname][i] = 1
else:
break
return df
def pnl(df, chained=False):
deals = []
pnl = 0
if not chained:
for idx, row in df[(df['Buy Mod'] != 0) | (df['Sell Mod'] != 0)].iterrows():
current_trade = row['Sell Mod'] * row["bidpx_"] - row['Buy Mod'] * row["askpx_"]
pnl = pnl + current_trade
deals.append(current_trade)
print("Running PnL: ", pnl)
print("Check PnL: {} vs {}".format(pnl, np.sum(deals)))
return pnl, len(deals)
else:
is_opened = False
for idx, row in df.iterrows():
if row["Buy"]:
if is_opened:
deals.append(-row["askpx_"])
deals.append(-row["askpx_"])
is_opened = True
elif row["Sell"]:
if is_opened:
deals.append(row["bidpx_"])
deals.append(row["bidpx_"])
is_opened = True
print(len(deals))
deals.pop()
print(len(deals))
return np.sum(deals), len(deals)
def __main__():
"""
Trading Simulator from curriculumvite trading competition
see also the arvix Paper from Roni Mittelman http://arxiv.org/pdf/1508.00317v1
Modified by Ernst.Tmp@gmx.at
produces data to train a neural net
"""
# Trades smaller than this will be omitted
min_trade_amount = None
comission = 0.0
if len(sys.argv) < 2 :
print ("Usage: day_trading_file, NOT target_price-file ")
sys.exit()
day_file = sys.argv[1]
try:
write_spans = True if sys.argv[2] == "--spans" else False
except IndexError:
write_spans = False
try:
chained_deals = True if sys.argv[3] == "--chained-deals" else False
except IndexError:
chained_deals = False
generate_signals_for_file(day_file, comission, write_spans, chained_deals)
__main__();
| |
from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(TestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=600; includeSubDomains",
)
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret, None)
| |
# dagparser.py - parser and generator for concise description of DAGs
#
# Copyright 2010 Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import re, string
import util
from i18n import _
def parsedag(desc):
'''parses a DAG from a concise textual description; generates events
"+n" is a linear run of n nodes based on the current default parent
"." is a single node based on the current default parent
"$" resets the default parent to -1 (implied at the start);
otherwise the default parent is always the last node created
"<p" sets the default parent to the backref p
"*p" is a fork at parent p, where p is a backref
"*p1/p2/.../pn" is a merge of parents p1..pn, where the pi are backrefs
"/p2/.../pn" is a merge of the preceding node and p2..pn
":name" defines a label for the preceding node; labels can be redefined
"@text" emits an annotation event for text
"!command" emits an action event for the current node
"!!my command\n" is like "!", but to the end of the line
"#...\n" is a comment up to the end of the line
Whitespace between the above elements is ignored.
A backref is either
* a number n, which references the node curr-n, where curr is the current
node, or
* the name of a label you placed earlier using ":name", or
* empty to denote the default parent.
All string valued-elements are either strictly alphanumeric, or must
be enclosed in double quotes ("..."), with "\" as escape character.
Generates sequence of
('n', (id, [parentids])) for node creation
('l', (id, labelname)) for labels on nodes
('a', text) for annotations
('c', command) for actions (!)
('C', command) for line actions (!!)
Examples
--------
Example of a complex graph (output not shown for brevity):
>>> len(list(parsedag("""
...
... +3 # 3 nodes in linear run
... :forkhere # a label for the last of the 3 nodes from above
... +5 # 5 more nodes on one branch
... :mergethis # label again
... <forkhere # set default parent to labeled fork node
... +10 # 10 more nodes on a parallel branch
... @stable # following nodes will be annotated as "stable"
... +5 # 5 nodes in stable
... !addfile # custom command; could trigger new file in next node
... +2 # two more nodes
... /mergethis # merge last node with labeled node
... +4 # 4 more nodes descending from merge node
...
... """)))
34
Empty list:
>>> list(parsedag(""))
[]
A simple linear run:
>>> list(parsedag("+3"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
Some non-standard ways to define such runs:
>>> list(parsedag("+1+2"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
>>> list(parsedag("+1*1*"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
>>> list(parsedag("*"))
[('n', (0, [-1]))]
>>> list(parsedag("..."))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
A fork and a join, using numeric back references:
>>> list(parsedag("+2*2*/2"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
>>> list(parsedag("+2<2+1/2"))
[('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
Placing a label:
>>> list(parsedag("+1 :mylabel +1"))
[('n', (0, [-1])), ('l', (0, 'mylabel')), ('n', (1, [0]))]
An empty label (silly, really):
>>> list(parsedag("+1:+1"))
[('n', (0, [-1])), ('l', (0, '')), ('n', (1, [0]))]
Fork and join, but with labels instead of numeric back references:
>>> list(parsedag("+1:f +1:p2 *f */p2"))
[('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
('n', (2, [0])), ('n', (3, [2, 1]))]
>>> list(parsedag("+1:f +1:p2 <f +1 /p2"))
[('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
('n', (2, [0])), ('n', (3, [2, 1]))]
Restarting from the root:
>>> list(parsedag("+1 $ +1"))
[('n', (0, [-1])), ('n', (1, [-1]))]
Annotations, which are meant to introduce sticky state for subsequent nodes:
>>> list(parsedag("+1 @ann +1"))
[('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))]
>>> list(parsedag('+1 @"my annotation" +1'))
[('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))]
Commands, which are meant to operate on the most recently created node:
>>> list(parsedag("+1 !cmd +1"))
[('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))]
>>> list(parsedag('+1 !"my command" +1'))
[('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))]
>>> list(parsedag('+1 !!my command line\\n +1'))
[('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))]
Comments, which extend to the end of the line:
>>> list(parsedag('+1 # comment\\n+1'))
[('n', (0, [-1])), ('n', (1, [0]))]
Error:
>>> try: list(parsedag('+1 bad'))
... except Exception, e: print e
invalid character in dag description: bad...
'''
if not desc:
return
wordchars = string.ascii_letters + string.digits
labels = {}
p1 = -1
r = 0
def resolve(ref):
if not ref:
return p1
elif ref[0] in string.digits:
return r - int(ref)
else:
return labels[ref]
chiter = (c for c in desc)
def nextch():
try:
return chiter.next()
except StopIteration:
return '\0'
def nextrun(c, allow):
s = ''
while c in allow:
s += c
c = nextch()
return c, s
def nextdelimited(c, limit, escape):
s = ''
while c != limit:
if c == escape:
c = nextch()
s += c
c = nextch()
return nextch(), s
def nextstring(c):
if c == '"':
return nextdelimited(nextch(), '"', '\\')
else:
return nextrun(c, wordchars)
c = nextch()
while c != '\0':
while c in string.whitespace:
c = nextch()
if c == '.':
yield 'n', (r, [p1])
p1 = r
r += 1
c = nextch()
elif c == '+':
c, digs = nextrun(nextch(), string.digits)
n = int(digs)
for i in xrange(0, n):
yield 'n', (r, [p1])
p1 = r
r += 1
elif c in '*/':
if c == '*':
c = nextch()
c, pref = nextstring(c)
prefs = [pref]
while c == '/':
c, pref = nextstring(nextch())
prefs.append(pref)
ps = [resolve(ref) for ref in prefs]
yield 'n', (r, ps)
p1 = r
r += 1
elif c == '<':
c, ref = nextstring(nextch())
p1 = resolve(ref)
elif c == ':':
c, name = nextstring(nextch())
labels[name] = p1
yield 'l', (p1, name)
elif c == '@':
c, text = nextstring(nextch())
yield 'a', text
elif c == '!':
c = nextch()
if c == '!':
cmd = ''
c = nextch()
while c not in '\n\r\0':
cmd += c
c = nextch()
yield 'C', cmd
else:
c, cmd = nextstring(c)
yield 'c', cmd
elif c == '#':
while c not in '\n\r\0':
c = nextch()
elif c == '$':
p1 = -1
c = nextch()
elif c == '\0':
return # in case it was preceded by whitespace
else:
s = ''
i = 0
while c != '\0' and i < 10:
s += c
i += 1
c = nextch()
raise util.Abort(_('invalid character in dag description: '
'%s...') % s)
def dagtextlines(events,
addspaces=True,
wraplabels=False,
wrapannotations=False,
wrapcommands=False,
wrapnonlinear=False,
usedots=False,
maxlinewidth=70):
'''generates single lines for dagtext()'''
def wrapstring(text):
if re.match("^[0-9a-z]*$", text):
return text
return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"'
def gen():
labels = {}
run = 0
wantr = 0
needroot = False
for kind, data in events:
if kind == 'n':
r, ps = data
# sanity check
if r != wantr:
raise util.Abort(_("expected id %i, got %i") % (wantr, r))
if not ps:
ps = [-1]
else:
for p in ps:
if p >= r:
raise util.Abort(_("parent id %i is larger than "
"current id %i") % (p, r))
wantr += 1
# new root?
p1 = r - 1
if len(ps) == 1 and ps[0] == -1:
if needroot:
if run:
yield '+' + str(run)
run = 0
if wrapnonlinear:
yield '\n'
yield '$'
p1 = -1
else:
needroot = True
if len(ps) == 1 and ps[0] == p1:
if usedots:
yield "."
else:
run += 1
else:
if run:
yield '+' + str(run)
run = 0
if wrapnonlinear:
yield '\n'
prefs = []
for p in ps:
if p == p1:
prefs.append('')
elif p in labels:
prefs.append(labels[p])
else:
prefs.append(str(r - p))
yield '*' + '/'.join(prefs)
else:
if run:
yield '+' + str(run)
run = 0
if kind == 'l':
rid, name = data
labels[rid] = name
yield ':' + name
if wraplabels:
yield '\n'
elif kind == 'c':
yield '!' + wrapstring(data)
if wrapcommands:
yield '\n'
elif kind == 'C':
yield '!!' + data
yield '\n'
elif kind == 'a':
if wrapannotations:
yield '\n'
yield '@' + wrapstring(data)
elif kind == '#':
yield '#' + data
yield '\n'
else:
raise util.Abort(_("invalid event type in dag: %s")
% str((type, data)))
if run:
yield '+' + str(run)
line = ''
for part in gen():
if part == '\n':
if line:
yield line
line = ''
else:
if len(line) + len(part) >= maxlinewidth:
yield line
line = ''
elif addspaces and line and part != '.':
line += ' '
line += part
if line:
yield line
def dagtext(dag,
addspaces=True,
wraplabels=False,
wrapannotations=False,
wrapcommands=False,
wrapnonlinear=False,
usedots=False,
maxlinewidth=70):
'''generates lines of a textual representation for a dag event stream
events should generate what parsedag() does, so:
('n', (id, [parentids])) for node creation
('l', (id, labelname)) for labels on nodes
('a', text) for annotations
('c', text) for commands
('C', text) for line commands ('!!')
('#', text) for comment lines
Parent nodes must come before child nodes.
Examples
--------
Linear run:
>>> dagtext([('n', (0, [-1])), ('n', (1, [0]))])
'+2'
Two roots:
>>> dagtext([('n', (0, [-1])), ('n', (1, [-1]))])
'+1 $ +1'
Fork and join:
>>> dagtext([('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])),
... ('n', (3, [2, 1]))])
'+2 *2 */2'
Fork and join with labels:
>>> dagtext([('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])),
... ('l', (1, 'p2')), ('n', (2, [0])), ('n', (3, [2, 1]))])
'+1 :f +1 :p2 *f */p2'
Annotations:
>>> dagtext([('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))])
'+1 @ann +1'
>>> dagtext([('n', (0, [-1])),
... ('a', 'my annotation'),
... ('n', (1, [0]))])
'+1 @"my annotation" +1'
Commands:
>>> dagtext([('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))])
'+1 !cmd +1'
>>> dagtext([('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))])
'+1 !"my command" +1'
>>> dagtext([('n', (0, [-1])),
... ('C', 'my command line'),
... ('n', (1, [0]))])
'+1 !!my command line\\n+1'
Comments:
>>> dagtext([('n', (0, [-1])), ('#', ' comment'), ('n', (1, [0]))])
'+1 # comment\\n+1'
>>> dagtext([])
''
Combining parsedag and dagtext:
>>> dagtext(parsedag('+1 :f +1 :p2 *f */p2'))
'+1 :f +1 :p2 *f */p2'
'''
return "\n".join(dagtextlines(dag,
addspaces,
wraplabels,
wrapannotations,
wrapcommands,
wrapnonlinear,
usedots,
maxlinewidth))
| |
import unittest
from transpose import transpose
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class TransposeTest(unittest.TestCase):
def test_empty_string(self):
input_line = ""
expected = ""
self.assertEqual(
transpose(input_line),
expected
)
def test_two_characters_in_a_row(self):
input_line = "A1"
expected = [
"A",
"1"
]
self.assertEqual(
transpose(input_line),
"\n".join(expected)
)
def test_two_characters_in_a_column(self):
input_line = [
"A",
"1"
]
expected = "A1"
self.assertEqual(
transpose("\n".join(input_line)),
expected
)
def test_simple(self):
input_line = [
"ABC",
"123"
]
expected = [
"A1",
"B2",
"C3"
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_single_line(self):
input_line = ["Single line."]
expected = [
"S",
"i",
"n",
"g",
"l",
"e",
" ",
"l",
"i",
"n",
"e",
"."
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_first_line_longer_than_second_line(self):
input_line = [
"The fourth line.",
"The fifth line."
]
expected = [
"TT",
"hh",
"ee",
" ",
"ff",
"oi",
"uf",
"rt",
"th",
"h ",
" l",
"li",
"in",
"ne",
"e.",
"."
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_second_line_longer_than_first_line(self):
input_line = [
"The first line.",
"The second line."
]
expected = [
"TT",
"hh",
"ee",
" ",
"fs",
"ie",
"rc",
"so",
"tn",
" d",
"l ",
"il",
"ni",
"en",
".e",
" ."
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_square(self):
input_line = [
"HEART",
"EMBER",
"ABUSE",
"RESIN",
"TREND"
]
expected = [
"HEART",
"EMBER",
"ABUSE",
"RESIN",
"TREND"
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_rectangle(self):
input_line = [
"FRACTURE",
"OUTLINED",
"BLOOMING",
"SEPTETTE"
]
expected = [
"FOBS",
"RULE",
"ATOP",
"CLOT",
"TIME",
"UNIT",
"RENT",
"EDGE"
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_triangle(self):
input_line = [
"T",
"EE",
"AAA",
"SSSS",
"EEEEE",
"RRRRRR"
]
expected = [
"TEASER",
" EASER",
" ASER",
" SER",
" ER",
" R"
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
def test_mixed_line_length(self):
input_line = [
"The longest line.",
"A long line.",
"A longer line.",
"A line."
]
expected = [
"TAAA",
"h ",
"elll",
" ooi",
"lnnn",
"ogge",
"n e.",
"glr",
"ei ",
"snl",
"tei",
" .n",
"l e",
"i .",
"n",
"e",
"."
]
self.assertEqual(
transpose("\n".join(input_line)),
"\n".join(expected)
)
if __name__ == '__main__':
unittest.main()
| |
"""Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import print_function, division
from sympy.core import S, sympify, pi
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import oo, Rational
from sympy.core.compatibility import range
from sympy.core.symbol import Dummy
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.geometry.exceptions import GeometryError
from sympy.polys import Poly, PolynomialError, DomainError
from sympy.polys.polyutils import _nsort, _not_a_coeff
from sympy.solvers import solve
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from .entity import GeometryEntity, GeometrySet
from .point import Point
from .line import LinearEntity, Line
from .util import _symbol, idiff
import random
from sympy.utilities.decorator import doctest_depends_on
class Ellipse(GeometrySet):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point2D(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center)
if len(center) != 2:
raise ValueError('The center of "{0}" must be a two dimensional point'.format(cls))
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def ambient_dimension(self):
return 2
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point2D(0, 0)
"""
return self.args[0]
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = a - b < 0
if o == True:
return a
elif o == False:
return b
return self.vradius
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = b - a < 0
if o == True:
return a
elif o == False:
return b
return self.hradius
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
from sympy import Integral
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = Dummy('x', real=True)
return 4*self.major*Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
@property
def focus_distance(self):
"""The focale distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point2D(-2*sqrt(2), 0), Point2D(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
h, v = self.hradius, self.vradius
return (self.center.x - h, self.center.y - v, self.center.x + h, self.center.y + v)
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point2D(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point2D(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(*self.args)
if (angle/S.Pi).is_integer:
return super(Ellipse, self).rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point2D(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point2D(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
from .util import _uniquely_named_symbol
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line(Point2D(3, 0), Point2D(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and len(inter) == 1
and isinstance(inter[0], Point))
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line(Point2D(0, 0), Point2D(1, 0))]
>>> e.normal_lines(c)
[Line(Point2D(0, 0), Point2D(0, 1)), Line(Point2D(0, 0), Point2D(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line(Point2D(-38/47, -85/31), Point2D(9/47, -21/17)),
Line(Point2D(19/13, -43/21), Point2D(32/13, -8/3))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
if len(xeq.free_symbols) == 1:
try:
# this is so much faster, it's worth a try
xsol = Poly(xeq, x).real_roots()
except (DomainError, PolynomialError, NotImplementedError):
xsol = _nsort(solve(xeq, x), separated=True)[0]
points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol]
else:
raise NotImplementedError(
'intersections for the general ellipse are not supported')
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
points = [pt.n(prec) for pt in points]
slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point2D(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*cos(t),
self.center.y + self.vradius*sin(t))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point2D(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point2D(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point2D(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
# Definite and potential symbolic intersections are allowed.
elif (det > 0) != False:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point2D(0, -7), Point2D(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point2D(0, -3*sqrt(15)/4), Point2D(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point2D(2, -3*sqrt(7)/4), Point2D(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point2D(-363/175, -48*sqrt(111)/175), Point2D(-363/175, 48*sqrt(111)/175), Point2D(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point2D(-17/5, -12/5), Point2D(-17/5, 12/5), Point2D(7/5, -12/5), Point2D(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x)
y = _symbol(y)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __contains__(self, o):
if isinstance(o, Point):
x = Dummy('x', real=True)
y = Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG ellipse element for the Ellipse.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
from sympy.core.evalf import N
c = N(self.center)
h, v = N(self.hradius), N(self.vradius)
return (
'<ellipse fill="{1}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" cx="{2}" cy="{3}" rx="{4}" ry="{5}"/>'
).format(2. * scale_factor, fill_color, c.x, c.y, h, v)
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle costructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point2D(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0])
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point2D(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point2D(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point2D(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point2D(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
from .polygon import Polygon
| |
#!/usr/bin/python
# Filename: watdb.py
'''
watdb nee trbotdb
@author: Andrew Philpot
@version 0.10
Usage: python watdb.py
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
'''
## major functionality in this module beginning trbot/wat merge (0.6)
##
## 1. layer above web.py's web.database or raw MySQLdb, using jsonconf
## 2. query support layer, tools for query assert/fetch/update invariants
##
##
import sys
import getopt
import jsonconf
import os
import util
from util import iterChunks
import re
## v 0.10
import web
assert web.__version__ == '0.37'
# _orig_interpolate=web.db._interpolate # needed?
def _interpolate_ignore_dollar_sign(format):
# print "enter _interpolate_ignore_dollar_sign"
return [(0, format)]
web.db._interpolate = _interpolate_ignore_dollar_sign
## end v 0.10
from web.db import sqlquote
from collections import defaultdict
from watlog import watlog
logger = watlog("wat.watdb")
logger.info('wat.watdb initialized')
# WE HAVE TWO ENGINES: MySQLdb and webpy
# note that MySQLdb is a zipped python egg and needs to be be able to
# uncompress into a python-eggs directory. For generality when
# running as a web server, I placed a directive in httpd.conf, but one
# could also do something like
# os.environ['PYTHON_EGG__CACHE'] = '/tmp/python-eggs'
import MySQLdb
import web
web.config.debug = False
VERSION = '0.10'
REVISION = "$Revision: 21852 $"
VERBOSE = True
COMMA = ", "
BACKSLASH="\x5c"
SINGLEQUOTE="\x27"
# Must prefix NUL, C-Z, single quote/apostrophe, backslash with backslash
ESCAPABLE="\x00\x1a\x27\x5c"
ENGINE = 'webpy'
CONF = 'test'
INSERTED=1
FETCHED=2
FETCHFAILED=3
SOLOINSERTNOOP=4
TESTING=5
# def kwoteValue(v):
# if str(v).upper() == "NULL":
# return "NULL"
# else:
# return (SINGLEQUOTE
# +
# "".join([BACKSLASH + c if c in ESCAPABLE else c for c in str(v)])
# +
# SINGLEQUOTE)
def kwoteValue(v):
"""Hmm, I would prefer using '\'' instead of reverting to " quotes"""
return str(sqlquote(v))
def wh(column_name, value, rel='='):
"""is sqlquote good enough to prevent SQL injection?"""
if value:
return """(`%s` %s %s)""" % (column_name, rel, kwoteValue(value))
else:
raise ValueError
# 16 January 2013 by Philpot
# Intended as context manager for web.config.debug
# 17 January 2013 by Philpot
# I now believe web.config.debug only works when you do it at the beginning,
# before instantiating anything
class EngineVerbosity():
"""Seems that this should work, but not being reflected in calls to web.db.query"""
def __init__(self, setting):
# self.setting = setting
#print "before, it was %s" % web.config.debug
#print "init to %s" % setting
self._setting = setting
def __enter__(self):
# self.setting.save()
self._old = web.config.debug
web.config.debug = self._setting
# return self.setting
#print "set to %s" % self._setting
#print "wcd %s" % web.config.debug
return web.config.debug
def __exit__(self, type, value, traceback):
# self.setting.restore()
#print "restore to %s" % self._old
web.config.debug = self._old
#del(self._old)
#del(self._setting)
watdbClassNames = defaultdict(lambda : "Watdb",
webpy = "WebpyWatdb",
MySQLdb = "MySQLdbWatdb")
def watdbClassName(engine):
return watdbClassNames[engine]
def watdbClass(engine):
className = watdbClassName(engine)
return globals().get(className)
class Watdb(object):
def __init__(self, verbose=VERBOSE, conf=CONF, engine=ENGINE, test=False):
self.verbose = verbose
self.cfg = None
self.conf = conf
self.test = test
if self.conf:
self.readConfig()
self.specialize(engine or self.cfg.get('engine'))
def __unicode__(self):
engine = hasattr(self, "engine") and self.engine
conf = hasattr(self, "conf") and self.conf
return '<Watdb %s %s>' % (engine, conf)
def __str__(self):
return self.__unicode__()
def __repr__(self):
return self.__unicode__()
def readConfig(self):
root = os.path.join(sys.path[0], "conf")
self.cfg = jsonconf.chooseJson(self.conf, 'db', root=root)
self.engine = self.cfg['engine']
def specialize(self, engine):
try:
self.__class__ = watdbClass(engine)
self.engine = engine
except:
logger.warning("Failed to specialize %s to %s" % (self,engine))
def testing(self):
test = hasattr(self, "test") and self.test
if test:
if hasattr(test, "__call__"):
return test()
else:
return True
return False
def insertionQuery(self, table, formals, values):
"""INSERT IGNORE is MySQL specific..."""
return ("INSERT IGNORE INTO `%s`" % table +
" (" + COMMA.join(["`%s`" % formal for formal in formals]) +") " +
" VALUES (" + COMMA.join([self.kwote(value) for value in values]) + ")")
def fetchQuery(self, table, *columnsAndValues):
"untested"
j = " AND ".join(["(`%s`=%s)" % (column,self.kwote(value))
for column,value
in iterChunks(columnsAndValues, 2)])
query = ("SELECT id FROM `%s`" % table
+
(" WHERE " if columnsAndValues else "")
+
j)
return query
## rest of methods unported
# def maybeQuery(owner, sql):
# """assert SQL, return the record number (if succeed)"""
# if testing(owner):
# logger.info(sql)
# return -1
# else:
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# return db.insert_id() or None
# def maybeFetch(owner, sql):
# """assert SQL, return the record number (if succeed)"""
# if testing(owner):
# logger.info(sql)
# return []
# else:
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# rows = cur.fetchall()
# return rows
# def mqfi(owner, sql, table, *columnsAndValues):
# return maybeQuery(owner, sql) or fetchId(owner, table, *columnsAndValues)
# def fetchId(owner, table, *columnsAndValues):
# if testing(owner):
# return -1
# else:
# sql = ("select id from `%s` where" % table +
# " and ".join(["(`%s`=%s)" % (column,kwote(owner,value))
# for column,value
# in iterChunks(columnsAndValues, 2)]))
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# return cur.fetchone()[0]
# def updateFreq(owner, table, *columnsAndValues):
# if testing(owner):
# return -1
# else:
# sql = ("update " + table + " set `freq`=`freq`+1 where" +
# " and ".join(["(`%s`=%s)" % (column,kwote(owner,value))
# for column,value
# in iterChunks(columnsAndValues, 2)]))
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# def insertionQuery(owner, table, formals, values):
# return ("INSERT IGNORE INTO `%s`" % table +
# " (" + COMMA.join(["`%s`" % formal for formal in formals]) +") " +
# " VALUES (" + COMMA.join([kwote(owner, value) for value in values]) + ")")
# def fetchQuery(owner, table, *columnsAndValues):
# j = " AND ".join(["(`%s`=%s)" % (column,kwote(owner,value))
# for column,value
# in iterChunks(columnsAndValues, 2)])
# query = ("SELECT id FROM `%s`" % table
# +
# (" WHERE " if columnsAndValues else "")
# +
# j)
# return query
# def ensureId(owner, insert, fetch):
# test = owner.test
# if testing(owner):
# logger.info(insert or "" + "\n" + fetch or "")
# return -1
# else:
# db = owner.db
# cur = db.cursor()
# insert = re.sub(r"""%""", "%%", insert)
# cur.execute(insert, ())
# id = db.insert_id() or None
# if id and id>0:
# return id
# else:
# if fetch:
# fetch = re.sub(r"""%""", "%%", fetch)
# cur.execute(fetch, ())
# all = cur.fetchone()
# return all[0] if all else None
# else:
# logger.warning("solo insert was no-op")
# return None
cxns = dict()
def findCxn(key):
return cxns.get(key, None)
class WebpyWatdb(Watdb):
def __init__(self, verbose=VERBOSE, engine=ENGINE, conf=CONF):
logger.warning("Don't call directly")
def connect(self):
cfg = self.cfg
key = ("mysql",cfg['user'],cfg['password'],cfg['dsname'],cfg['host'])
found = findCxn(key)
if found:
self.cxn = found
self.cursor = lambda: self.cxn
return self
else:
self.cxn = web.database(dbn='mysql', user=cfg['user'], passwd=cfg['password'], db=cfg['dsname'], host=cfg['host'])
self.cursor = lambda: self.cxn
cxns[key] = self.cxn
return self
def disconnect(self):
self.cxn = None
return self.cxn
def kwote(self, thing):
return kwoteValue(thing)
def maybeFetch(self, sql):
"""assumes connected. assert SQL, return the rows"""
if self.testing():
logger.info(sql)
return []
else:
with EngineVerbosity(self.verbose):
rows = self.cxn.query(sql)
return rows
def maybeQuery(self, sql):
"""assumes connected. assert SQL, return the record number (if succeed)"""
if self.testing():
logger.info(sql)
return -1
else:
lid = None
with EngineVerbosity(self.verbose):
succeed = self.cxn.query(sql)
if succeed:
lid = int(self.cxn.query('select last_insert_id() as id')[0].id)
return lid
def fetchId(self, table, *columnsAndValues):
if self.testing():
return -1
else:
sql = ("select id from `%s` where " % table +
" and ".join(["(`%s`=%s)" % (column,self.kwote(value))
for column,value
in iterChunks(columnsAndValues, 2)]))
with EngineVerbosity(self.verbose):
rows = self.cxn.query(sql)
return rows and int(rows[0].id)
def mqfi(self, sql, table, *columnsAndValues):
"""SQL is probably insertionQuery"""
return self.maybeQuery(sql) or self.fetchId(table, *columnsAndValues)
# is this every used? It should be
def updateFreq(self, table, *columnsAndValues):
if self.testing():
return -1
else:
sql = ("update " + table + " set `freq`=`freq`+1 where " +
" and ".join(["(`%s`=%s)" % (column,self.kwote(value))
for column,value
in iterChunks(columnsAndValues, 2)]))
with EngineVerbosity(self.verbose):
self.cxn.query(sql)
def ensureId(self, insert, fetch):
if self.testing():
logger.info(insert or "" + "\n" + fetch or "")
# logger.info("EXIT 0")
return (-1, TESTING)
else:
# for LIKE comparisons?
insert = re.sub(r"""%""", "%%", insert)
lid = None
with EngineVerbosity(self.verbose):
# should wrap this in a transaction?
succeed = self.cxn.query(insert)
if succeed:
lid = self.cxn.query('select last_insert_id() as id')[0].id or None
# id = db.insert_id() or None
if lid and lid>0:
# Case I
# INSERTED
# return (id, True)
# logger.info("EXIT I")
return (int(lid), INSERTED)
else:
if fetch:
fetch = re.sub(r"""%""", "%%", fetch)
all = self.cxn.query(fetch)[0]
if all:
# Case II
# already there, FETCHED
# return (id, False)
# logger.info("EXIT II")
return (int(all.id), FETCHED)
else:
# Case III
# tried to fetch, found nothing
# return (None, False)
logger.warning("FETCH query matched nothing")
# logger.info("EXIT III")
return (None, FETCHFAILED)
else:
logger.warning("solo insert was no-op")
# Case IV
# Fetch was not provided, i.e., insert was mandatory, but it failed
# logger.info("EXIT IV")
return (None, SOLOINSERTNOOP)
class MySQLdbWatdb(Watdb):
def __init__(self, verbose=VERBOSE, engine=ENGINE, conf=CONF):
logger.warning("Don't call directly")
def connect(self):
cfg = self.cfg
self.cxn = MySQLdb.connect(passwd=cfg['password'], db=cfg['dsname'], user=cfg['user'], host=cfg['host'])
self.cursor = self.cxn.cursor
def disconnect(self):
try:
self.cxn.close()
except MySQLdb.ProgrammingError, err:
# don't stress closing a closed connection
pass
return self.cxn
def kwote(self, thing):
try:
# to emulate old trbotdb, use MySQLdb handle's escape method
return self.cxn.escape(util.emittable(thing),MySQLdb.converters.conversions)
except AttributeError:
# fall-through: use our own (like webpy case)
return kwoteValue(thing)
def maybeQuery(self, sql):
"""assumes connected. assert SQL, return the record number (if succeed)"""
if self.testing():
logger.info(sql)
return -1
else:
cur = self.cxn.cursor()
cur.execute(sql, ())
return self.cxn.insert_id() or None
def maybeFetch(self, sql):
"""assumes connected. assert SQL, return the rows"""
if self.testing():
logger.info(sql)
return []
else:
cur = self.cxn.cursor()
# this should work but is not; ### fix
cur.execute(sql, ())
rows = cur.fetchall()
return rows
def main(argv=None):
'''this is called if run from command line'''
# process command line arguments
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "hv",
["echo=", "help",
"verbose"])
except getopt.error, msg:
print >> sys.stderr, msg
print >> sys.stderr, "for help use --help"
sys.exit(2)
# default options
my_verbose = VERBOSE
# process options
for o, a in opts:
if o in ("-h","--help"):
print __doc__
sys.exit(0)
if o in ("--echo", ):
print a
if o in ("-v", "--verbose", ):
my_verbose = True
if my_verbose:
print >> sys.stderr, "ARGV is %s" % (argv)
w1 = Watdb(conf='test')
print w1
w2 = Watdb(conf='test')
print w2
w2.connect()
print w2.cxn
# print list(w2.maybeFetch('select id,code,tier from markets limit 10'))
import random
i = w2.maybeQuery("insert ignore into markets(code,tier) values('abc',%s)" % (random.randint(0,100)))
print i
print "fetchId"
j = w2.fetchId("markets", "tier", 100)
j = w2.fetchId("phones", "retained", "A'B")
print j
print "\nVerbose"
w2.verbose = True
j = w2.fetchId("phones", "retained", "A'B")
print "\nQuiet"
w2.verbose = False
j = w2.fetchId("phones", "retained", "A'B")
exit(0)
print "mqfi"
iq = "insert ignore into markets(code,tier) values('xyz',102)"
k = w2.mqfi(iq, "markets", "code", "xyz")
print k
# w3 = Watdb(conf='esc000__sigma', engine=False)
# print w3
# w3.connect()
# print w3.cxn
# print w3.maybeFetch('select source,market,city from posts limit 10')
print "updateFreq"
w2.updateFreq("phones", "phone", "3104488201")
w2.updateFreq("phones", "phone", "3104488201")
w2.updateFreq("phones", "phone", "3104488201")
print "insertionQuery"
import random
iq = w2.insertionQuery("phones", ["phone"], [str(random.randint(1111111111,9999999999))])
print iq
print "fetchQuery"
import random
fq = w2.fetchQuery("phones", "phone", str(random.randint(1111111111,9999999999)),
"code", "A'C")
print fq
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
# End of watdb.py
| |
# Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create the dictionary for log functions related to network data flow.
Functions:
+ get_regex_list: Get the regular expressions and function list.
"""
from __future__ import absolute_import
import logparser.logs.network.network as network
def get_regex_list():
"""Return the regular expressions and functions list for this module."""
regex = []
# Parser entity.
regex.append([network.on_parse_packet,
r"MIGInterpreter_parse:(?:RTI0x\w+:)?(\w+) from " +
r"0X(\w+),0X(\w+)"])
# Send and receive data from transport layer.
regex.append([network.on_udpv4_send,
r"NDDS_Transport_UDPv4_send:(?:RTI0x\w+:)?\w+ sent (\d+) " +
r"bytes to 0X(\w+):(\d+)"])
regex.append([network.on_udpv4_receive,
r"NDDS_Transport_UDPv4_receive_rEA:(?:RTI0x\w+:)?\w+ " +
r"received (\d+) bytes from 0X(\w+):(\d+)"])
regex.append([network.on_shmem_send,
r"NDDS_Transport_Shmem_send:(?:RTI0x\w+:)?\w+ signalling " +
r"0X(\w+)"])
regex.append([network.on_shmem_receive,
r"NDDS_Transport_Shmem_receive_rEA:(?:RTI0x\w+:)?\w+ " +
r"received (\d+) bytes"])
# Errors from transport layer.
regex.append([network.on_error_unreachable_network,
r"NDDS_Transport_UDPv4_send:OS sendmsg\(\) failure, " +
r"error 0X65: Network is unreachable"])
regex.append([network.on_error_no_transport_available,
r"RTINetioSender_addDestination:no transport for " +
r"destination request (.+)"])
# Messages from the participant entity
regex.append([network.on_unregister_not_asserted_entity("Participant"),
r"DISCEndpointDiscoveryPlugin_unregisterParticipant" +
r"RemoteEndpoints:remote endpoint not previously asserted " +
r"by plugin: 0X(\w+),0X(\w+),0X(\w+),(\w+)"])
regex.append([network.on_unregister_not_asserted_entity("DataWriter"),
r"DISCEndpointDiscoveryPlugin_unregisterRemoteWriter:" +
r"remote endpoint not previously asserted by plugin: " +
r"0X(\w+),0X(\w+),0X(\w+),0X(\w+)"])
regex.append([network.on_unregister_not_asserted_entity("DataReader"),
r"DISCEndpointDiscoveryPlugin_unregisterRemoteReader:" +
r"remote endpoint not previously asserted by plugin: " +
r"0X(\w+),0X(\w+),0X(\w+),0X(\w+)"])
regex.append([network.on_send_participant_announcement,
r"DISCSimpleParticipantDiscoveryPlugin_" +
r"remoteParticipantDiscovered:re-announcing participant " +
r"self: 0X(\w+),0X(\w+),0X(\w+),0X(\w+)"])
# Messages from write entity.
regex.append([network.on_schedule_data,
r"COMMENDSrWriterService_write:\s?writer oid 0x(\w+) " +
r"schedules job for sn \(([\d,]+)\)"])
regex.append([network.on_send_data,
r"COMMENDSrWriterService_agentFunction:\s?writer " +
r"oid 0x(\w+) sends sn \(([\d,]+)\)"])
regex.append([network.on_resend_data,
r"COMMENDSrWriterService_sendSyncRepairData:\[\d+,\d+\] " +
r"writer oid 0x(\w+) resends DATA to reader " +
r"\(0x(\w+),0x(\w+),0x(\w+),0x(\w+)\), sn " +
r"\[\(([\d,]+)\)\]"])
regex.append([network.on_send_periodic_data,
r"COMMENDAnonWriterService_on(?:Domain)?BroadcastEvent:" +
r"writing periodic keyed data: SN=0x(\d+), " +
r"key=\(16\)(\w+), \d+ bytes"])
regex.append([network.on_send_gap,
r"COMMENDSrWriterService_sendGapToLocator: writer oid " +
r"0x(\w+) sends GAP to reader " +
r"\(0x(\w+),0x(\w+),0x(\w+),0x(\w+)\) " +
r"for sn \[\(([\d,]+)\)-\(([\d,]+)\)\)"])
regex.append([network.on_send_preemptive_gap,
r"COMMENDSrWriterService_onSubmessage:\[\d+,\d+\] " +
r"writer oid 0x(\w+) sends preemptive GAP to volatile " +
r"reader \(0x(\w+),0x(\w+),0x(\w+),0x(\w+)\)"])
regex.append([network.on_send_preemptive_hb,
r"COMMENDSrWriterService_assertRemoteReader: " +
r"writer oid 0x(\w+) sends preemptive HB for sn " +
r"\(([\d,]+)\)-\(([\d,]+)\)"])
regex.append([network.on_send_periodic_hb,
r"COMMENDSrWriterService_onSendHeartbeatEvent:\[\d+,\d+\] " +
r"writer oid 0x(\w+) sends periodic unicast HB for sn " +
r"\(([\d,]+)\)-\(([\d,]+)\), epoch\((\d+)\)"])
regex.append([network.on_send_piggyback_hb,
r"COMMENDSrWriterService_agentFunction:\s?writer oid " +
r"0x(\w+) sends piggyback HB \(([\d,]+)\)-\(([\d,]+)\)"])
regex.append([network.on_send_piggyback_hb_syncrepair,
r"COMMENDSrWriterService_sendSyncRepairData:\[\d+,\d+\] " +
r"writer oid 0x(\w+) sends piggyback HB for sn " +
r"\(([\d,]+)\)-\(([\d,]+), epoch\((\d+)\)\)"])
regex.append([network.on_send_hb_response,
r"COMMENDSrWriterService_onSubmessage:\[\d+,\d+\] " +
r"writer oid 0x(\w+) sends response HB for sn " +
r"\(([\d,]+)\)-\(([\d,]+)\) epoch\((\d+)\)"])
regex.append([network.on_receive_ack,
r"COMMENDSrWriterService_onSubmessage:\[\d+,\d+\] " +
r"writer oid 0x(\w+) receives ACKNACK from reader " +
r"0x([\w\.]+) for lead \[\(([\d,]+)\)\] bitcount\((\d+)\)," +
r" epoch\((\d+)\), isPureNack\((\d+)\)"])
regex.append([network.on_instance_not_found,
r"WriterHistoryMemoryPlugin_addSample:instance not found"])
regex.append([network.on_send_from_deleted_writer,
r"PRESPsWriter_writeInternal:" +
r"pres psWriter already destroyed"])
regex.append([network.on_fail_serialize,
r"PRESWriterHistoryDriver_initializeSample:!serialize"])
regex.append([network.on_drop_unregister_no_ack_instance,
r"WriterHistoryMemoryPlugin_dropFullyAcked" +
r"UnregisteredInstance:unregistered instances " +
r"not fully acked"])
regex.append([network.on_writer_exceed_max_entries,
r"WriterHistoryMemoryPlugin_addEntryToInstance:" +
r"exceeded max entries"])
regex.append([network.on_writer_batching_exceed_max_entries,
r"WriterHistoryMemoryPlugin_getBatchSampleGroupEntry:" +
r"exceeded max entries"])
regex.append([network.on_batch_serialize_failure,
r"PRESPsWriter_writeBatchInternal:!error serializing " +
r"batch sample"])
regex.append([network.on_ignore_ack,
r"COMMENDSrWriterService_onSubmessage:!ACK ignored: " +
r"number of active RR is > 1, but sum of RR at is 0"])
# Messages from read entity.
regex.append([network.on_receive_data,
r"COMMEND(Be|Sr)ReaderService_onSubmessage:" +
r"(?:\[\d+,\d+\])?\s?reader oid 0x(\w+) received (\w+) of " +
r"sn\(([\w,]+)\), vSn\(([\w,]+)\) from writer 0x([\w\.]+)"])
regex.append([network.on_receive_fragment,
r"COMMENDSrReaderService_onSubmessage:\[\d+,\d+\] " +
r"reader oid 0x(\w+) received fragments (\d+)-(\d+) " +
r"for sn \(([\w,]+)\)"])
regex.append([network.on_complete_fragment,
r"COMMENDSrReaderService_onSubmessage:\s+reader oid " +
r"0x(\w+) fully received sn \(([\d,]+)\)"])
regex.append([network.on_receive_out_order_data,
r"COMMENDSrReaderService_onSubmessage:\[\d+,\d+\] reader " +
r"oid 0x(\w+) received (old|new) out-of-range DATA of sn " +
r"\(([\d,]+)\) from writer 0x([\w\.]+)"])
regex.append([network.on_accept_data,
r"COMMENDSrReaderService_onSubmessage:\s+accepted " +
r"sn\(([\d,]+)\), dataRcvd\.lead\(([\d,]+)\), " +
r"nextRelSn\(([\d,]+)\), reservedCount\((\d+)\)"])
regex.append([network.on_rejected_data,
r"COMMENDSrReaderService_onSubmessage:\s+rejected " +
r"sn\(([\d,]+)\), dataRcvd\.lead\(([\d,]+)\), " +
r"nextRelSn\(([\d,]+)\), reservedCount\((\d+)\)"])
regex.append([network.on_receive_hb,
r"COMMENDSrReaderService_onSubmessage:\[\d+,\d+\] reader " +
r"oid 0x(\w+) received (HB|HB_BATCH|HB_SESSION) for " +
r"sn \(([\d,]+)\)-\(([\d,]+)\), epoch\((\d+)\) " +
r"from writer 0x([\w\.]+)"])
regex.append([network.on_received_gap,
r"COMMENDSrReaderService_onSubmessage:\[\d+,\d+\] reader " +
r"oid 0x(\w+) received GAP for sn \(([\d,]+)\) to lead " +
r"\(([\d,]+)\) bit count (\d+) from writer " +
r"0x([\w\.]+)"])
regex.append([network.on_send_ack,
r"COMMENDSrReaderService_onSubmessage:\[\d+,\d+\] reader " +
r"oid 0x(\w+) sent ACK of bitmap lead\(([\d,]+)\), " +
r"bitcount\((\d+)\), epoch\((\d+)\) to writer 0x([\w\.]+)"])
regex.append([network.on_send_ack,
r"COMMENDSrReaderService_onAckOnceEvent:\[\d+,\d+\] reader" +
r" oid 0x(\w+) sent ACK of bitmap lead\(([\d,]+)\), " +
r"bitcount\((\d+)\), epoch\((\d+)\) to writer ([\w\.]+)"])
regex.append([network.on_send_nack,
r"COMMENDSrReaderService_sendAckNacks:\[\d+,\d+\] reader " +
r"oid 0x(\w+) sent NACK of bitmap lead\(([\d,]+)\), " +
r"bitcount\((\d+)\), epoch\((\d+)\) to writer 0x([\w\.]+)"])
regex.append([network.on_send_nack_frag,
r"COMMENDSrReaderService_onSubmessage:\[\d+,\d+\] reader " +
r"oid 0x(\d+) sends NACK_FRAG for sn \(([\d,]+)\)"])
regex.append([network.on_suppress_hb,
r"COMMENDSrReaderService_onSubmessage:\s+reader oid " +
r"0x(\w+) suppressed HEARTBEAT"])
regex.append([network.on_reader_exceed_max_entries,
r"PRESCstReaderCollator_addEntryToInstance:" +
r"exceeded max entriesPerInstance"])
regex.append([network.on_write_max_blocking_time_expired,
r"PRESPsWriter_writeInternal:max blocking time expired"])
regex.append([network.on_sample_received_from_deleted_writer,
r"COMMENDBeReaderService_onSubmessage:" +
r"!get ber remoteWriter"])
regex.append([network.on_deserialize_failure,
r"PRES(PsReaderQueue|CstReaderCollator)_storeSampleData:" +
r"(?:RTI0x\w+:)?!deserialize"])
regex.append([network.on_shmem_queue_full,
r"NDDS_Transport_Shmem_send:failed to add data. " +
r"shmem queue for port 0x(\w+) is full " +
r"\(received_message_count_max=(\d+), " +
r"receive_buffer_size=(\d+)\). Try to increase queue " +
r"resource limits\."])
return regex
| |
import os
import sys
import datetime
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from wp_frontman.blog import Blog
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
"--strip-blog-from-default", action="store_true", dest="strip_blog", default=False,
help="strip the 'blog' prefix WP adds to post permalinks in the default blog"
),
make_option(
"--prepend-main-rules", action="store_true", dest="prepend_main", default=False,
help="prepend urlrules defined in settings.py to blog urls"
),
make_option(
"--append-main-rules", action="store_true", dest="append_main", default=False,
help="append urlrules defined in settings.py to blog urls"
),
make_option(
"--dir", type="str", dest="dir", default=None,
help="where to write the generated files, defaults to 'wpf_blogs' in the toplevel Django app"
),
make_option(
"--force", action="store_true", dest="force", default=False,
help="overwrite urlrules files if already present, create destination folder if missing"
),
make_option(
"--check-feed-redirect", action="store_true", dest="feed_redirect", default=False,
help="use settings from the wpf_feedburner plugin to check for feed redirection"
),
make_option(
"--search-base", type="str", dest="search_base", default=None,
help="override the search base (path prefix for search pages), defaults to WP's default of 'search'"
),
make_option(
"--links-base", type="str", dest="links_base", default='links',
help="base path to use for link categories, defaults to 'links'"
),
make_option(
"--search-view", type="str", dest="search_view", default='wp_frontman.views.search',
help="override the search view, defaults to wp_frontman.views.search"
),
make_option(
"--default-feed-url", type="str", dest="default_feed", default=None,
help="default feed url for reversing urlrules"
),
make_option(
"--additional-feeds", type="str", dest="additional_feeds", default=None,
help="feed paths separated by commas used to set up additional url rules redirecting to the main feed"
),
)
usage = lambda s, sc: "Usage: ./manage.py %s [options] [blog_id]" % sc
help = "Creates Django urlrules from WP options for WPFrontman blogs."
requires_model_validation = False
def handle(self, *args, **opts):
if opts['prepend_main'] and opts['append_main']:
raise CommandError("Please specify only one of --prepend-main-rules and --append-main-rules.")
root_urlconf = getattr(settings, 'ROOT_URLCONF')
d = opts['dir']
if d is None:
d = os.path.join(
os.path.dirname(os.path.abspath(sys.modules[os.environ['DJANGO_SETTINGS_MODULE']].__file__)),
'wpf_blogs'
)
else:
d = os.path.abspath(d)
if not os.path.isdir(d):
if opts['force']:
print "Creating destination folder '%s'" % d
try:
os.mkdir(d)
except Exception, e:
raise CommandError(e)
else:
raise CommandError("No such directory %s" % d)
fname = os.path.join(d, '__init__.py')
if not os.path.isfile(fname):
if opts['force']:
print "Creating missing '__init__.py' file in destination folder"
try:
file(fname, 'w').write("# created by WP Frontman on %s\n" % datetime.datetime.now().isoformat())
except Exception, e:
raise CommandError(e)
else:
print >>sys.stderr, "File '__init__.py' missing in destination folder, remember to create one"
blogs = Blog.site.blogs
print "%s blogs found" % len(blogs)
if args:
try:
args = [int(a) for a in args]
except (TypeError, ValueError):
raise CommandError("Blog ids passed as arguments must be integers.")
blogs = [b for b in blogs if b in args]
print "%s blog%s kept" % (len(blogs), '' if len(blogs) == 1 else 's')
print
for b in blogs:
blog = Blog.factory(b)
fname = os.path.join(d, 'urls_%s.py' % blog.blog_id)
if os.path.isfile(fname) and not opts['force']:
print "Skipping blog id '%s', urlrules file already present." % blog.blog_id
continue
print "processing blog id %s (%s)" % (blog.blog_id, blog.blogname)
rules, rules_dict = self.create_rules(
blog, opts['strip_blog'], opts['feed_redirect'], opts['default_feed'],
opts['search_base'], opts['search_view'], opts['additional_feeds'],
opts['links_base']
)
buffer = ["'''WP Frontman urlrules for blog '%s', generated on %s'''\n" % (blog.blog_id, datetime.datetime.now().isoformat())]
buffer.append("from django.conf.urls.defaults import *\n")
if opts['prepend_main'] or opts['append_main']:
buffer.append("from %s import urlpatterns as main_patterns\n" % root_urlconf)
buffer.append("")
buffer.append("urlpatterns = patterns('',")
buffer.append(" " + ",\n ".join(rules) % rules_dict)
buffer.append(")\n\n")
if opts['prepend_main']:
buffer.append("urlpatterns = main_patterns + urlpatterns\n")
elif opts['append_main']:
buffer.append("urlpatterns += main_patterns\n")
buffer = "\n".join(buffer)
try:
exec buffer in dict()
except Exception, e:
raise CommandError("Error parsing generated URL rules: %s" % e)
file(fname, 'w').write(buffer)
print "URL rules stored in '%s'." % fname
print
def create_rules(
self, blog, strip_default=False, feed_redirect=False, default_feed=None,
search_base=None, search_view=None, additional_feeds=None,
links_base='links'
):
ps = blog.permalink_ps
if strip_default and blog.is_default and ps.startswith('blog\\/'):
ps = ps[6:]
append_slash = '/' if getattr(settings, 'APPEND_SLASH', False) else '/?'
path = blog.urlrule_path
rules = list()
### home ###
rules.append("url('^%(path)s$', 'wp_frontman.views.index', name='wpf_index')")
rules.append("url('^%(path)spage/(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.index', name='wpf_index')")
rules.append("# url('^%(path)spage(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.index')")
### posts ###
# files
rules.append("url('^files/(?P<filepath>.*?)$', 'wp_frontman.views.media', name='wpf_media')")
# regular post
rules.append("url('^%(path)s%(ps)s%(append_slash)s$', 'wp_frontman.views.post', name='wpf_post')")
### archives, month ###
rules.append("url('^%(path)s(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})%(append_slash)s$', 'wp_frontman.views.archives', name='wpf_archives')")
rules.append("url('^%(path)s(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/page/(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.archives', name='wpf_archives')")
rules.append("url('^%(path)s(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/page(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.archives')")
### archives, year ###
rules.append("url('^%(path)s(?P<year>[0-9]{4})%(append_slash)s$', 'wp_frontman.views.archives', name='wpf_archives')")
rules.append("url('^%(path)s(?P<year>[0-9]{4})/page/?(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.archives', name='wpf_archives')")
### author page ###
rules.append("url('^%(path)s%(author_base)s/(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.author', name='wpf_author')")
rules.append("url('^%(path)s%(author_base)s/(?P<slug>[^/]+)/page/?(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.author', name='wpf_author')")
### category ###
rules.append("url('^%(path)s%(category_base)s/(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.category', name='wpf_category')")
rules.append("url('^%(path)s%(category_base)s/(?P<slug>[^/]+)/page/?(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.category', name='wpf_category')")
rules.append("#url('^%(path)s%(category_base)s/(?P<slug>[^/]+)/page(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.category')")
if blog.site.categories_as_sets:
rules.append("url('^%(path)s%(category_base)s/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)/page/(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.category', name='wpf_category')")
rules.append("#url('^%(path)s%(category_base)s/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)/page(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.category')")
rules.append("url('^%(path)s%(category_base)s/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.category', name='wpf_category')")
### links ###
rules.append("url('^%(path)s%(links_base)s/(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.links', name='wpf_link_category')")
rules.append("url('^%(path)s%(links_base)s/(?P<slug>[^/]+)/page/?(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.links', name='wpf_link_category')")
rules.append("#url('^%(path)s%(links_base)s/(?P<slug>[^/]+)/page(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.links')")
rules.append("url('^%(path)s%(links_base)s/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)/page/(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.links', name='wpf_link_category')")
rules.append("#url('^%(path)s%(links_base)s/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)/page(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.links')")
rules.append("url('^%(path)s%(links_base)s/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.links', name='wpf_link_category')")
### search ###
#url('^ricerca/(?P<q>.+)/(?P<month>[0-9]{6})/page/(?P<page>[0-9]+)/$', 'wpf_sphinx.views.search', name='wpf_sphinx'),
#url('^ricerca/(?P<q>.+)/page/(?P<page>[0-9]+)/$', 'wpf_sphinx.views.search', name='wpf_sphinx'),
#url('^ricerca/(?P<q>.+)/(?P<month>[0-9]{6})/$', 'wpf_sphinx.views.search', name='wpf_sphinx'),
#url('^ricerca/(?P<q>.+)/$', 'wpf_sphinx.views.search', name='wpf_sphinx'),
#url('^ricerca/$', 'wpf_sphinx.views.search', name='wpf_sphinx'),
rules.append("url('^%(path)s%(search_base)s/(?P<q>.+)/(?P<month>[0-9]{6})/page/(?P<page>[0-9]+)%(append_slash)s$', '" + search_view + "', name='wpf_search')")
rules.append("url('^%(path)s%(search_base)s/(?P<q>.+)/page/?(?P<page>[0-9]+)%(append_slash)s$', '" + search_view + "', name='wpf_search')")
rules.append("url('^%(path)s%(search_base)s/(?P<q>.+)/(?P<month>[0-9]{6})%(append_slash)s$', '" + search_view + "', name='wpf_search')")
rules.append("url('^%(path)s%(search_base)s%(append_slash)s$', '" + search_view + "', name='wpf_search')")
rules.append("url('^%(path)s%(search_base)s/(?P<q>.+)%(append_slash)s$', '" + search_view + "', name='wpf_search')")
### tag ###
rules.append("url('^%(path)s%(tag_base)s/(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.tag', name='wpf_post_tag')")
rules.append("url('^%(path)s%(tag_base)s/(?P<slug>[^/]+)/page/?(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.tag', name='wpf_post_tag')")
### pages ###
# we use a custom middleware class for pages, we might want to add a rule at the bottom anyway so that we can use reverse though
### feeds ###
# TODO: check comments feed urls, etc. // feed_check_redirect
if feed_redirect:
if default_feed:
rules.append("url('^%(path)s" + default_feed + "$', 'wp_frontman.views.feed_check_redirect', name='wpf_feed')")
rules.append("url('^%(path)sfeed%(append_slash)s$', 'wp_frontman.views.feed_check_redirect')")
else:
rules.append("url('^%(path)sfeed%(append_slash)s$', 'wp_frontman.views.feed_check_redirect', name='wpf_feed')")
if additional_feeds:
for f in additional_feeds.split(','):
rules.append("url('^%(path)s" + f + "$', 'wp_frontman.views.feed_check_redirect')")
rules.append("url('^%(path)s(?:feed/)?(?:feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_check_redirect')")
rules.append("url('^%(path)swp-(?:atom|feed|rdf|rss|rss2)\.php$', 'wp_frontman.views.feed_check_redirect')")
rules.append("url('^%(path)sfeed_for_feedburner.xml$', 'wp_frontman.views.feed')")
rules.append("url('^%(path)scomments/feed%(append_slash)s$', 'wp_frontman.views.feed_check_redirect', dict(feed_type='comments'), name='wpf_feed_comments')")
rules.append("url('^%(path)scomments/(?:feed/)?(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_check_redirect', dict(feed_type='comments'))")
rules.append("url('^%(path)swp-commentsrss2.php$', 'wp_frontman.views.feed_check_redirect', dict(feed_type='comments'))")
rules.append("url('^%(path)scomment_feed_for_feedburner.xml$', 'wp_frontman.views.feed_comments')")
else:
if default_feed:
rules.append("url('^%(path)s" + default_feed + "$', 'wp_frontman.views.feed', name='wpf_feed')")
rules.append("url('^%(path)sfeed%(append_slash)s$', 'wp_frontman.views.feed')")
else:
rules.append("url('^%(path)sfeed%(append_slash)s$', 'wp_frontman.views.feed', name='wpf_feed')")
rules.append("url('^%(path)s(?:feed/)?(?:feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed')")
rules.append("url('^%(path)swp-(?:atom|feed|rdf|rss|rss2)\.php$', 'wp_frontman.views.feed')")
rules.append("url('^%(path)scomments/feed%(append_slash)s$', 'wp_frontman.views.feed_comments', name='wpf_feed_comments')")
rules.append("url('^%(path)scomments/(?:feed/)?(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_comments')")
rules.append("url('^%(path)swp-commentsrss2.php$', 'wp_frontman.views.feed_comments')")
rules.append("url('^%(path)s%(author_base)s/(?P<nicename>[^/]+)/feed%(append_slash)s$', 'wp_frontman.views.feed_author', name='wpf_feed_author')")
rules.append("url('^%(path)s%(author_base)s/(?P<nicename>[^/]+)(?:/feed)?/(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_author')")
rules.append("url('^%(path)s%(ps)s/feed%(append_slash)s$', 'wp_frontman.views.feed_post', name='wpf_feed_post')")
rules.append("url('^%(path)s%(ps)s/(?:feed/)?(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_post')")
# trackback
rules.append("url('^%(path)s%(ps)s/trackback%(append_slash)s$', 'wp_frontman.views.trackback', name='wpf_trackback')")
# paged post
rules.append("url('^%(path)s%(ps)s/page/(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.post', name='wpf_post')")
# paged comments
if getattr(blog, 'page_comments', False):
rules.append("url('^%(path)s%(ps)s/comment-page-(?P<comment_page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.post', name='wpf_post_comments')")
### user registration, activation and login ###
rules.append("# change the following according to your needs")
rules.append("url('^users/registration%(append_slash)s$', 'wp_frontman.views.user_registration', name='wpf_user_registration')")
rules.append("url('^users/welcome%(append_slash)s$', 'wp_frontman.views.user_registration_message', name='wpf_user_registration_message')")
rules.append("url('^users/activation%(append_slash)s$', 'wp_frontman.views.user_activation', name='wpf_user_activation')")
rules.append("url('^users/login%(append_slash)s$', 'wp_frontman.views.user_login', name='wpf_user_login')")
rules.append("url('^users/logout%(append_slash)s$', 'wp_frontman.views.user_logout', name='wpf_user_logout')")
rules.append("url('^users/profile%(append_slash)s$', 'wp_frontman.views.user_profile', name='wpf_user_profile')")
rules.append("# dummy rules to have reverse url mapping work when using wp for users stuff")
rules.append("#url('^wp-signup.php$', 'wp_frontman.views.user_registration', name='wpf_user_registration')")
rules.append("#url('^wp-activate.php$', 'wp_frontman.views.user_activation', name='wpf_user_activation')")
rules.append("#url('^wp-login.php$', 'wp_frontman.views.user_login', name='wpf_user_login')")
rules.append("#url('^wp-login.php\?action=logout$', 'wp_frontman.views.user_login', name='wpf_user_login')")
rules.append("#url('^%(path)s%(category_base)s/(?P<slug>[^/]+)(?:/feed)?/(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_category', name='wpf_feed_category')")
rules.append("#url('^%(path)s%(search_base)s/(?P<q>.+)(?:/feed)?/(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_search', name='wpf_feed_search')")
rules.append("#url('^%(path)s%(tag_base)s/(?P<slug>[^/]+)(?:/feed)?/(?P<feed_type>feed|rdf|rss|rss2|atom)%(append_slash)s$', 'wp_frontman.views.feed_tag', name='wpf_feed_tag')")
### custom taxonomies ###
custom_taxonomies = blog.options.get('wp_frontman', dict()).get('custom_taxonomies', dict())
if custom_taxonomies.get('enabled'):
for k, v in custom_taxonomies.get('custom_taxonomies', dict()).items():
slug = v.get('rewrite_slug')
if slug:
rules.append("# '%s' custom taxonomy" % k)
rules.append("url('^%(path)s" + slug + "/(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.taxonomy', dict(taxonomy='" + k + "'), name='wpf_" + k + "')")
rules.append("url('^%(path)s" + slug + "/(?P<slug>[^/]+)/page/?(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.taxonomy', dict(taxonomy='" + k + "'), name='wpf_" + k + "')")
if v['rewrite_hierarchical']:
rules.append("url('^%(path)s" + slug + "/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)/page/(?P<page>[0-9]+)%(append_slash)s$', 'wp_frontman.views.taxonomy', dict(taxonomy='" + k + "'), name='wpf_" + k + "')")
rules.append("url('^%(path)s" + slug + "/(?P<parents>(?:[^/]+/)+)(?P<slug>[^/]+)%(append_slash)s$', 'wp_frontman.views.taxonomy', dict(taxonomy='" + k + "'), name='wpf_" + k + "')")
### add attachment urls here if we need them ###
rules_dict = dict(append_slash=append_slash, ps=ps, path=path)
for k in ('author', 'category', 'links', 'search', 'tag'):
v = locals().get('%s_base' % k) or getattr(blog, '%s_base' % k, k)
if v is None or not v.strip():
v = k
if v.startswith('/'):
v = v[1:]
rules_dict['%s_base' % k] = v
return rules, rules_dict
| |
from __future__ import absolute_import
import Cookie
import copy
import threading
import time
import urllib
import urlparse
from email.utils import parsedate_tz, formatdate, mktime_tz
import netlib
from netlib import http, tcp, odict, utils, encoding
from netlib.http import cookies, semantics, http1
from .tcp import TCPHandler
from .primitives import KILL, ProtocolHandler, Flow, Error
from ..proxy.connection import ServerConnection
from .. import utils, controller, stateobject, proxy
class decoded(object):
"""
A context manager that decodes a request or response, and then
re-encodes it with the same encoding after execution of the block.
Example:
with decoded(request):
request.content = request.content.replace("foo", "bar")
"""
def __init__(self, o):
self.o = o
ce = o.headers.get_first("content-encoding")
if ce in encoding.ENCODINGS:
self.ce = ce
else:
self.ce = None
def __enter__(self):
if self.ce:
self.o.decode()
def __exit__(self, type, value, tb):
if self.ce:
self.o.encode(self.ce)
class MessageMixin(stateobject.StateObject):
_stateobject_attributes = dict(
httpversion=tuple,
headers=odict.ODictCaseless,
body=str,
timestamp_start=float,
timestamp_end=float
)
_stateobject_long_attributes = {"body"}
def get_state(self, short=False):
ret = super(MessageMixin, self).get_state(short)
if short:
if self.body:
ret["contentLength"] = len(self.body)
elif self.body == CONTENT_MISSING:
ret["contentLength"] = None
else:
ret["contentLength"] = 0
return ret
def get_decoded_content(self):
"""
Returns the decoded content based on the current Content-Encoding
header.
Doesn't change the message iteself or its headers.
"""
ce = self.headers.get_first("content-encoding")
if not self.body or ce not in encoding.ENCODINGS:
return self.body
return encoding.decode(ce, self.body)
def decode(self):
"""
Decodes body based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Returns True if decoding succeeded, False otherwise.
"""
ce = self.headers.get_first("content-encoding")
if not self.body or ce not in encoding.ENCODINGS:
return False
data = encoding.decode(ce, self.body)
if data is None:
return False
self.body = data
del self.headers["content-encoding"]
return True
def encode(self, e):
"""
Encodes body with the encoding e, where e is "gzip", "deflate"
or "identity".
"""
# FIXME: Error if there's an existing encoding header?
self.body = encoding.encode(e, self.body)
self.headers["content-encoding"] = [e]
def copy(self):
c = copy.copy(self)
c.headers = self.headers.copy()
return c
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both the headers
and the body of the message. Encoded body will be decoded
before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
with decoded(self):
self.body, c = utils.safe_subn(
pattern, repl, self.body, *args, **kwargs
)
c += self.headers.replace(pattern, repl, *args, **kwargs)
return c
class HTTPRequest(MessageMixin, semantics.Request):
"""
An HTTP request.
Exposes the following attributes:
method: HTTP method
scheme: URL scheme (http/https)
host: Target hostname of the request. This is not neccessarily the
directy upstream server (which could be another proxy), but it's always
the target server we want to reach at the end. This attribute is either
inferred from the request itself (absolute-form, authority-form) or from
the connection metadata (e.g. the host in reverse proxy mode).
port: Destination port
path: Path portion of the URL (not present in authority-form)
httpversion: HTTP version tuple, e.g. (1,1)
headers: odict.ODictCaseless object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
form_in: The request form which mitmproxy has received. The following
values are possible:
- relative (GET /index.html, OPTIONS *) (covers origin form and
asterisk form)
- absolute (GET http://example.com:80/index.html)
- authority-form (CONNECT example.com:443)
Details: http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-25#section-5.3
form_out: The request form which mitmproxy will send out to the
destination
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
form_in,
method,
scheme,
host,
port,
path,
httpversion,
headers,
body,
timestamp_start=None,
timestamp_end=None,
form_out=None,
):
semantics.Request.__init__(
self,
form_in,
method,
scheme,
host,
port,
path,
httpversion,
headers,
body,
timestamp_start,
timestamp_end,
)
self.form_out = form_out or form_in
# Have this request's cookies been modified by sticky cookies or auth?
self.stickycookie = False
self.stickyauth = False
# Is this request replayed?
self.is_replay = False
_stateobject_attributes = MessageMixin._stateobject_attributes.copy()
_stateobject_attributes.update(
form_in=str,
method=str,
scheme=str,
host=str,
port=int,
path=str,
form_out=str,
is_replay=bool
)
@classmethod
def from_state(cls, state):
f = cls(
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None)
f.load_state(state)
return f
@classmethod
def from_protocol(
self,
protocol,
include_body=True,
body_size_limit=None,
):
req = protocol.read_request(
include_body = include_body,
body_size_limit = body_size_limit,
)
return HTTPRequest(
req.form_in,
req.method,
req.scheme,
req.host,
req.port,
req.path,
req.httpversion,
req.headers,
req.body,
req.timestamp_start,
req.timestamp_end,
)
@classmethod
def wrap(self, request):
return HTTPRequest(
form_in=request.form_in,
method=request.method,
scheme=request.scheme,
host=request.host,
port=request.port,
path=request.path,
httpversion=request.httpversion,
headers=request.headers,
body=request.body,
timestamp_start=request.timestamp_start,
timestamp_end=request.timestamp_end,
form_out=(request.form_out if hasattr(request, 'form_out') else None),
)
def __hash__(self):
return id(self)
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in the headers, the
request path and the body of the request. Encoded content will be
decoded before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = MessageMixin.replace(self, pattern, repl, *args, **kwargs)
self.path, pc = utils.safe_subn(
pattern, repl, self.path, *args, **kwargs
)
c += pc
return c
class HTTPResponse(MessageMixin, semantics.Response):
"""
An HTTP response.
Exposes the following attributes:
httpversion: HTTP version tuple, e.g. (1, 0), (1, 1), or (2, 0)
status_code: HTTP response status code
msg: HTTP response message
headers: ODict Caseless object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
httpversion,
status_code,
msg,
headers,
body,
timestamp_start=None,
timestamp_end=None,
):
semantics.Response.__init__(
self,
httpversion,
status_code,
msg,
headers,
body,
timestamp_start=timestamp_start,
timestamp_end=timestamp_end,
)
# Is this request replayed?
self.is_replay = False
self.stream = False
_stateobject_attributes = MessageMixin._stateobject_attributes.copy()
_stateobject_attributes.update(
status_code=int,
msg=str
)
@classmethod
def from_state(cls, state):
f = cls(None, None, None, None, None)
f.load_state(state)
return f
@classmethod
def from_protocol(
self,
protocol,
request_method,
include_body=True,
body_size_limit=None
):
resp = protocol.read_response(
request_method,
body_size_limit,
include_body=include_body
)
return HTTPResponse(
resp.httpversion,
resp.status_code,
resp.msg,
resp.headers,
resp.body,
resp.timestamp_start,
resp.timestamp_end,
)
@classmethod
def wrap(self, response):
return HTTPResponse(
httpversion=response.httpversion,
status_code=response.status_code,
msg=response.msg,
headers=response.headers,
body=response.body,
timestamp_start=response.timestamp_start,
timestamp_end=response.timestamp_end,
)
def _refresh_cookie(self, c, delta):
"""
Takes a cookie string c and a time delta in seconds, and returns
a refreshed cookie string.
"""
c = Cookie.SimpleCookie(str(c))
for i in c.values():
if "expires" in i:
d = parsedate_tz(i["expires"])
if d:
d = mktime_tz(d) + delta
i["expires"] = formatdate(d)
else:
# This can happen when the expires tag is invalid.
# reddit.com sends a an expires tag like this: "Thu, 31 Dec
# 2037 23:59:59 GMT", which is valid RFC 1123, but not
# strictly correct according to the cookie spec. Browsers
# appear to parse this tolerantly - maybe we should too.
# For now, we just ignore this.
del i["expires"]
return c.output(header="").strip()
def refresh(self, now=None):
"""
This fairly complex and heuristic function refreshes a server
response for replay.
- It adjusts date, expires and last-modified headers.
- It adjusts cookie expiration.
"""
if not now:
now = time.time()
delta = now - self.timestamp_start
refresh_headers = [
"date",
"expires",
"last-modified",
]
for i in refresh_headers:
if i in self.headers:
d = parsedate_tz(self.headers[i][0])
if d:
new = mktime_tz(d) + delta
self.headers[i] = [formatdate(new)]
c = []
for i in self.headers["set-cookie"]:
c.append(self._refresh_cookie(i, delta))
if c:
self.headers["set-cookie"] = c
| |
from StringIO import StringIO
from datetime import datetime
from zeit.cms.checkout.helper import checked_out
from zeit.cms.interfaces import ICMSContent
from zeit.cms.related.interfaces import IRelatedContent
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
from zeit.cms.workflow.interfaces import IPublishInfo, IPublish
import gocept.testing.mock
import logging
import mock
import os
import pytz
import shutil
import time
import transaction
import zeit.cms.related.interfaces
import zeit.cms.testing
import zeit.objectlog.interfaces
import zeit.workflow.publish
import zeit.workflow.testing
import zope.app.appsetup.product
import zope.component
import zope.i18n
class PublishTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.LAYER
def test_object_already_checked_out_should_raise(self):
article = ICMSContent('http://xml.zeit.de/online/2007/01/Somalia')
IPublishInfo(article).urgent = True
zeit.cms.checkout.interfaces.ICheckoutManager(article).checkout()
zope.security.management.endInteraction()
with zeit.cms.testing.interaction('zope.producer'):
with self.assertRaises(Exception) as info:
IPublish(article).publish(async=False)
self.assertIn('LockingError', str(info.exception))
self.assertEqual(False, IPublishInfo(article).published)
class FakePublishTask(zeit.workflow.publish.PublishRetractTask):
def __init__(self):
self.test_log = []
def _run(self, obj):
time.sleep(0.1)
self.test_log.append(obj)
@property
def jobid(self):
return None
class RelatedDependency(object):
zope.component.adapts(zeit.cms.interfaces.ICMSContent)
zope.interface.implements(
zeit.workflow.interfaces.IPublicationDependencies)
def __init__(self, context):
self.context = context
def get_dependencies(self):
relateds = zeit.cms.related.interfaces.IRelatedContent(self.context)
return relateds.related
class PublicationDependencies(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.LAYER
def setUp(self):
super(PublicationDependencies, self).setUp()
self.patches = gocept.testing.mock.Patches()
self.populate_repository_with_dummy_content()
self.setup_dates_so_content_is_publishable()
self.patches.add_dict(
zope.app.appsetup.product.getProductConfiguration('zeit.workflow'),
{'dependency-publish-limit': 2})
zope.component.getSiteManager().registerAdapter(
RelatedDependency, name='related')
def tearDown(self):
self.patches.reset()
zope.component.getSiteManager().unregisterAdapter(
RelatedDependency, name='related')
super(PublicationDependencies, self).tearDown()
def populate_repository_with_dummy_content(self):
self.related = []
for i in range(3):
item = ExampleContentType()
self.repository['t%s' % i] = item
self.related.append(self.repository['t%s' % i])
def setup_dates_so_content_is_publishable(self):
DAY1 = datetime(2010, 1, 1, tzinfo=pytz.UTC)
DAY2 = datetime(2010, 2, 1, tzinfo=pytz.UTC)
DAY3 = datetime(2010, 3, 1, tzinfo=pytz.UTC)
# XXX it would be nicer to patch this just for the items in question,
# but we lack the mechanics to easily substitute adapter instances
sem = self.patches.add('zeit.cms.content.interfaces.ISemanticChange')
sem().last_semantic_change = DAY1
sem().has_semantic_change = False
for item in self.related:
info = IPublishInfo(item)
info.published = True
info.date_last_published = DAY2
dc = self.patches.add('zope.dublincore.interfaces.IDCTimes')
dc().modified = DAY3
def publish(self, content):
IPublishInfo(content).urgent = True
IPublish(content).publish(async=False)
def test_should_not_publish_more_dependencies_than_the_limit_breadth(self):
content = self.repository['testcontent']
with checked_out(content) as co:
IRelatedContent(co).related = tuple(self.related)
BEFORE_PUBLISH = datetime.now(pytz.UTC)
self.publish(content)
self.assertEqual(
2, len([x for x in self.related
if IPublishInfo(x).date_last_published > BEFORE_PUBLISH]))
def test_should_not_publish_more_dependencies_than_the_limit_depth(self):
content = [self.repository['testcontent']] + self.related
for i in range(3):
with checked_out(content[i]) as co:
IRelatedContent(co).related = tuple([content[i + 1]])
BEFORE_PUBLISH = datetime.now(pytz.UTC)
self.publish(content[0])
self.assertEqual(
2, len([x for x in self.related
if IPublishInfo(x).date_last_published > BEFORE_PUBLISH]))
class SynchronousPublishTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.LAYER
def test_publish_and_retract_in_same_process(self):
article = ICMSContent('http://xml.zeit.de/online/2007/01/Somalia')
info = IPublishInfo(article)
info.urgent = True
publish = IPublish(article)
self.assertFalse(info.published)
publish.publish(async=False)
self.assertTrue(info.published)
publish.retract(async=False)
self.assertFalse(info.published)
logs = reversed(zeit.objectlog.interfaces.ILog(article).logs)
self.assertEqual(
['${name}: ${new_value}', 'Published', 'Retracted'],
[x.message for x in logs])
def test_synchronous_multi_publishing_works_with_unique_ids(self):
article = ICMSContent('http://xml.zeit.de/online/2007/01/Somalia')
info = IPublishInfo(article)
info.urgent = True
IPublish(article).publish_multiple([article.uniqueId], async=False)
self.assertTrue(info.published)
class PublishPriorityTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.LAYER
def test_determines_priority_via_adapter(self):
content = self.repository['testcontent']
info = IPublishInfo(content)
info.urgent = True
self.assertFalse(info.published)
with mock.patch(
'zeit.cms.workflow.interfaces.IPublishPriority') as priority,\
mock.patch.object(zeit.workflow.publish.PUBLISH_TASK,
'apply_async') as apply_async:
priority.return_value = zeit.cms.workflow.interfaces.PRIORITY_LOW
IPublish(content).publish()
apply_async.assert_called_with(
([u'http://xml.zeit.de/testcontent'],),
queuename='publish_lowprio')
def get_object_log(obj):
log = zeit.objectlog.interfaces.ILog(obj)
return [x.message for x in log.get_log()]
class PublishEndToEndTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.CELERY_LAYER
def setUp(self):
super(PublishEndToEndTest, self).setUp()
self.log = StringIO()
self.handler = logging.StreamHandler(self.log)
logging.root.addHandler(self.handler)
self.loggers = [None, 'zeit']
self.oldlevels = {}
for name in self.loggers:
log = logging.getLogger(name)
self.oldlevels[name] = log.level
log.setLevel(logging.INFO)
def tearDown(self):
logging.root.removeHandler(self.handler)
for name in self.loggers:
logging.getLogger(name).setLevel(self.oldlevels[name])
super(PublishEndToEndTest, self).tearDown()
def test_publish_via_celery_end_to_end(self):
content = ICMSContent('http://xml.zeit.de/online/2007/01/Somalia')
info = IPublishInfo(content)
self.assertFalse(info.published)
info.urgent = True
publish = IPublish(content).publish()
transaction.commit()
self.assertEqual('Published.', publish.get())
transaction.begin()
self.assertEllipsis("""\
Running job ...
Publishing http://xml.zeit.de/online/2007/01/Somalia
...
Done http://xml.zeit.de/online/2007/01/Somalia (...s)...""",
self.log.getvalue())
self.assertIn('Published', get_object_log(content))
def test_publish_multiple_via_celery_end_to_end(self):
c1 = ICMSContent('http://xml.zeit.de/online/2007/01/Flugsicherheit')
c2 = ICMSContent('http://xml.zeit.de/online/2007/01/Saarland')
i1 = IPublishInfo(c1)
i2 = IPublishInfo(c2)
self.assertFalse(i1.published)
self.assertFalse(i2.published)
i1.urgent = True
i2.urgent = True
publish = IPublish(c1).publish_multiple([c1, c2])
transaction.commit()
self.assertEqual('Published.', publish.get())
transaction.begin()
self.assertEllipsis("""\
Running job ...
for http://xml.zeit.de/online/2007/01/Flugsicherheit,
http://xml.zeit.de/online/2007/01/Saarland
Publishing http://xml.zeit.de/online/2007/01/Flugsicherheit,
http://xml.zeit.de/online/2007/01/Saarland
...
Done http://xml.zeit.de/online/2007/01/Flugsicherheit,
http://xml.zeit.de/online/2007/01/Saarland (...s)""",
self.log.getvalue())
self.assertIn('Published', get_object_log(c1))
self.assertIn('Published', get_object_log(c2))
class PublishErrorEndToEndTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.CELERY_LAYER
def setUp(self):
super(PublishErrorEndToEndTest, self).setUp()
self.bak_path = self.layer['publish-script'] + '.bak'
shutil.move(self.layer['publish-script'], self.bak_path)
with open(self.layer['publish-script'], 'w') as f:
f.write('#!/bin/sh\nexit 1')
os.chmod(self.layer['publish-script'], 0o755)
def tearDown(self):
shutil.move(self.bak_path, self.layer['publish-script'])
super(PublishErrorEndToEndTest, self).tearDown()
def test_error_during_publish_is_written_to_objectlog(self):
content = ICMSContent('http://xml.zeit.de/online/2007/01/Somalia')
info = IPublishInfo(content)
self.assertFalse(info.published)
info.urgent = True
publish = IPublish(content).publish()
transaction.commit()
with self.assertRaises(Exception) as err:
publish.get()
transaction.begin()
self.assertEqual("Error during publish/retract: ScriptError: ('', 1)",
str(err.exception))
self.assertIn(
"Error during publish/retract: ScriptError: ('', 1)",
[zope.i18n.interpolate(m, m.mapping)
for m in get_object_log(content)])
def test_error_during_publish_multiple_is_written_to_objectlog(self):
c1 = ICMSContent('http://xml.zeit.de/online/2007/01/Flugsicherheit')
c2 = ICMSContent('http://xml.zeit.de/online/2007/01/Saarland')
i1 = IPublishInfo(c1)
i2 = IPublishInfo(c2)
self.assertFalse(i1.published)
self.assertFalse(i2.published)
i1.urgent = True
i2.urgent = True
publish = IPublish(c1).publish_multiple([c1, c2])
transaction.commit()
with self.assertRaises(Exception) as err:
publish.get()
transaction.begin()
self.assertEqual("Error during publish/retract: ScriptError: ('', 1)",
str(err.exception))
self.assertIn(
"Error during publish/retract: ScriptError: ('', 1)",
[zope.i18n.interpolate(m, m.mapping) for m in get_object_log(c1)])
self.assertIn(
"Error during publish/retract: ScriptError: ('', 1)",
[zope.i18n.interpolate(m, m.mapping) for m in get_object_log(c2)])
class MultiPublishRetractTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.LAYER
def test_publishes_and_retracts_multiple_objects_in_single_script_call(
self):
c1 = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/Somalia')
c2 = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/eta-zapatero')
IPublishInfo(c1).urgent = True
IPublishInfo(c2).urgent = True
with mock.patch(
'zeit.workflow.publish.PublishTask'
'.call_publish_script') as script:
IPublish(self.repository).publish_multiple([c1, c2], async=False)
script.assert_called_with(['work/online/2007/01/Somalia',
'work/online/2007/01/eta-zapatero'])
self.assertTrue(IPublishInfo(c1).published)
self.assertTrue(IPublishInfo(c2).published)
with mock.patch(
'zeit.workflow.publish.RetractTask'
'.call_retract_script') as script:
IPublish(self.repository).retract_multiple([c1, c2], async=False)
script.assert_called_with(['work/online/2007/01/Somalia',
'work/online/2007/01/eta-zapatero'])
self.assertFalse(IPublishInfo(c1).published)
self.assertFalse(IPublishInfo(c2).published)
def test_accepts_uniqueId_as_well_as_ICMSContent(self):
with mock.patch('zeit.workflow.publish.MultiPublishTask.run') as run:
IPublish(self.repository).publish_multiple([
self.repository['testcontent'],
'http://xml.zeit.de/online/2007/01/Somalia'], async=False)
ids = run.call_args[0][0]
self.assertEqual([
'http://xml.zeit.de/testcontent',
'http://xml.zeit.de/online/2007/01/Somalia'], ids)
def test_empty_list_of_objects_does_not_run_publish(self):
with mock.patch(
'zeit.workflow.publish.PublishTask'
'.call_publish_script') as script:
IPublish(self.repository).publish_multiple([], async=False)
self.assertFalse(script.called)
def test_error_in_one_item_continues_with_other_items(self):
c1 = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/Somalia')
c2 = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/eta-zapatero')
IPublishInfo(c1).urgent = True
IPublishInfo(c2).urgent = True
calls = []
def after_publish(context, event):
calls.append(context.uniqueId)
if context.uniqueId == c1.uniqueId:
raise RuntimeError('provoked')
self.zca.patch_handler(
after_publish,
(zeit.cms.interfaces.ICMSContent,
zeit.cms.workflow.interfaces.IPublishedEvent))
with self.assertRaises(RuntimeError):
IPublish(self.repository).publish_multiple([c1, c2], async=False)
# PublishedEvent still happens for c2, even though c1 raised
self.assertIn(c2.uniqueId, calls)
# Error is logged
log = zeit.objectlog.interfaces.ILog(c1)
self.assertEqual(
[u'${name}: ${new_value}',
u'Collective Publication',
u'Error during publish/retract: ${exc}: ${message}'],
[x.message for x in log.get_log()])
| |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas.errors import (
InvalidIndexError,
PerformanceWarning,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = concat([df1, df2], ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.array([1, 2], dtype="Int64"))
b = Series(to_decimal([1, 2]))
result = concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_ordered_dict(self):
# GH 21510
expected = concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = concat({"First": Series(range(3)), "Another": Series(range(4))})
tm.assert_series_equal(result, expected)
def test_concat_duplicate_indices_raise(self):
# GH 45888: test raise for concat DataFrames with duplicate indices
# https://github.com/pandas-dev/pandas/issues/36263
df1 = DataFrame(np.random.randn(5), index=[0, 1, 2, 3, 3], columns=["a"])
df2 = DataFrame(np.random.randn(5), index=[0, 1, 2, 2, 4], columns=["b"])
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
concat([df1, df2], axis=1)
@pytest.mark.parametrize("pdt", [Series, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = Series(SparseArray([0, 1, 2]))
expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_dense_sparse():
# GH 30668
dtype = pd.SparseDtype(np.float64, None)
a = Series(pd.arrays.SparseArray([1, None]), dtype=dtype)
b = Series([1], dtype=float)
expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(dtype)
result = concat([a, b], axis=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
def test_duplicate_keys(keys):
# GH 33654
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
s1 = Series([7, 8, 9], name="c")
s2 = Series([10, 11, 12], name="d")
result = concat([df, s1, s2], axis=1, keys=keys)
expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
expected_columns = MultiIndex.from_tuples(
[(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
)
expected = DataFrame(expected_values, columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_duplicate_keys_same_frame():
# GH 43595
keys = ["e", "e"]
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = concat([df, df], axis=1, keys=keys)
expected_values = [[1, 4, 1, 4], [2, 5, 2, 5], [3, 6, 3, 6]]
expected_columns = MultiIndex.from_tuples(
[(keys[0], "a"), (keys[0], "b"), (keys[1], "a"), (keys[1], "b")]
)
expected = DataFrame(expected_values, columns=expected_columns)
with catch_warnings():
# result.columns not sorted, resulting in performance warning
simplefilter("ignore", PerformanceWarning)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"obj",
[
tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
tm.SubclassedSeries(np.arange(0, 10), name="A"),
],
)
def test_concat_preserves_subclass(obj):
# GH28330 -- preserve subclass
result = concat([obj, obj])
assert isinstance(result, type(obj))
def test_concat_frame_axis0_extension_dtypes():
# preserve extension dtype (through common_dtype mechanism)
df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")})
df2 = DataFrame({"a": np.array([4, 5, 6])})
result = concat([df1, df2], ignore_index=True)
expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
result = concat([df2, df1], ignore_index=True)
expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_concat_preserves_extension_int64_dtype():
# GH 24768
df_a = DataFrame({"a": [-1]}, dtype="Int64")
df_b = DataFrame({"b": [1]}, dtype="Int64")
result = concat([df_a, df_b], ignore_index=True)
expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype1,dtype2,expected_dtype",
[
("bool", "bool", "bool"),
("boolean", "bool", "boolean"),
("bool", "boolean", "boolean"),
("boolean", "boolean", "boolean"),
],
)
def test_concat_bool_types(dtype1, dtype2, expected_dtype):
# GH 42800
ser1 = Series([True, False], dtype=dtype1)
ser2 = Series([False, True], dtype=dtype2)
result = concat([ser1, ser2], ignore_index=True)
expected = Series([True, False, False, True], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("keys", "integrity"),
[
(["red"] * 3, True),
(["red"] * 3, False),
(["red", "blue", "red"], False),
(["red", "blue", "red"], True),
],
)
def test_concat_repeated_keys(keys, integrity):
# GH: 20816
series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})]
result = concat(series_list, keys=keys, verify_integrity=integrity)
tuples = list(zip(keys, ["a", "b", "c"]))
expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
tm.assert_series_equal(result, expected)
def test_concat_null_object_with_dti():
# GH#40841
dti = pd.DatetimeIndex(
["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)"
)
right = DataFrame(data={"C": [0.5274]}, index=dti)
idx = Index([None], dtype="object", name="Maybe Time (UTC)")
left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx)
result = concat([left, right], axis="columns")
exp_index = Index([None, dti[0]], dtype=object)
expected = DataFrame(
{"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]},
index=exp_index,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_empty_rangeindex():
# GH#41234
mi = MultiIndex.from_tuples([("B", 1), ("C", 1)])
df1 = DataFrame([[1, 2]], columns=mi)
df2 = DataFrame(index=[1], columns=pd.RangeIndex(0))
result = concat([df1, df2])
expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi)
tm.assert_frame_equal(result, expected)
def test_concat_posargs_deprecation():
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame([[1, 2, 3]], index=["a"])
df2 = DataFrame([[4, 5, 6]], index=["b"])
msg = (
"In a future version of pandas all arguments of concat "
"except for the argument 'objs' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = concat([df, df2], 0)
expected = DataFrame([[1, 2, 3], [4, 5, 6]], index=["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
Series(data=[1, 2]),
DataFrame(
data={
"col1": [1, 2],
}
),
DataFrame(dtype=float),
Series(dtype=float),
],
)
def test_concat_drop_attrs(data):
# GH#41828
df1 = data.copy()
df1.attrs = {1: 1}
df2 = data.copy()
df2.attrs = {1: 2}
df = concat([df1, df2])
assert len(df.attrs) == 0
@pytest.mark.parametrize(
"data",
[
Series(data=[1, 2]),
DataFrame(
data={
"col1": [1, 2],
}
),
DataFrame(dtype=float),
Series(dtype=float),
],
)
def test_concat_retain_attrs(data):
# GH#41828
df1 = data.copy()
df1.attrs = {1: 1}
df2 = data.copy()
df2.attrs = {1: 1}
df = concat([df1, df2])
assert df.attrs[1] == 1
| |
import numpy
from amuse.units import constants, units
from amuse.support.literature import LiteratureReferencesMixIn
try:
from scipy.special import gammainc,gamma
scipy_imported = True
except:
scipy_imported = False
class NFW_profile(LiteratureReferencesMixIn):
"""
Gravitational potential of the NFW (1996) halo
Two-power density spherical model suitable for modeling dark matter halos.
Density--potential pair:
* density(r) = rho0 / [r/rs * (1+r/rs)^2], where is the spherical radius
* potential(r) = -4*pi*G*rho0*rs^2 * ln(1+r/rs)/(r/rs)
.. [#] Navarro, Julio F.; Frenk, Carlos S.; White, Simon D. M., The Astrophysical Journal, Volume 490, Issue 2, pp. 493-508 (1996)
:argument rho0: density parameter
:argument rs: scale radius
"""
def __init__(self,rho0,rs,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.rho0 = rho0
self.rs = rs
self.G = G
self.four_pi_rho0 = 4.*numpy.pi*self.rho0
self.four_pi_rho0_G = self.four_pi_rho0*self.G
def radial_force(self,r):
r_rs = r/self.rs
ar = self.four_pi_rho0_G*self.rs**3*(1./(r*self.rs+r**2)-(1./r**2)*numpy.log(1.+r_rs))
#ar = self.four_pi_rho0_G*self.rs*((r_rs-(1.+r_rs)*numpy.log(1.+r_rs))/r_rs**2/(1.+r_rs))
return ar
def get_potential_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
r_rs = r/self.rs
return -1.*self.four_pi_rho0_G*self.rs**2*numpy.log(1.+r_rs)/r_rs
def get_gravity_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
fr = self.radial_force(r)
ax = fr*x/r
ay = fr*y/r
az = fr*z/r
return ax,ay,az
def enclosed_mass(self,r):
fr = self.radial_force(r)
return -r**2/self.G*fr
def circular_velocity(self,r):
fr = self.radial_force(r)
return (-r*fr).sqrt()
def mass_density(self,r):
r_rs = r/self.rs
return self.rho0 / (r_rs*(1.+r_rs)**2)
class MiyamotoNagai_profile(LiteratureReferencesMixIn):
"""
Miyamoto and Nagai (1975) axisymmetric disk.
* potential(R,z) = -GM / sqrt(R**2 + (a+sqrt(z**2+b**2))**2)
.. [#] Miyamoto, M.; Nagai, R., Astronomical Society of Japan, Publications, vol. 27, no. 4, 1975, p. 533-543 (1975)
:argument mass: total mass
:argument a: disk scale radius
:argument b: disk scale height
"""
def __init__(self,mass,a,b,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.mass = mass
self.a = a
self.b = b
self.G = G
self.GM = self.G*self.mass
self.a2 = self.a**2
self.b2 = self.b**2
def force_R(self,x,y,z):
R2 = x**2+y**2
R = R2.sqrt()
sqrt_z2_b2 = (z**2+self.b2).sqrt()
return -self.GM*R*(R2+(self.a+sqrt_z2_b2)**2)**(-1.5)
def force_z(self,x,y,z):
R2 = x**2+y**2
sqrt_z2_b2 = (z**2+self.b2).sqrt()
a_sqrt_z2_b2 = self.a+sqrt_z2_b2
return -self.GM*z*a_sqrt_z2_b2/((R2+a_sqrt_z2_b2**2)**1.5*sqrt_z2_b2)
def get_potential_at_point(self,eps,x,y,z):
R2 = x**2+y**2
return -self.GM/(R2+(self.a+(self.b2+z**2).sqrt())**2).sqrt()
def get_gravity_at_point(self,eps,x,y,z):
fR = self.force_R(x,y,z)
R = (x**2+y**2).sqrt()
ax = fR*x/R
ay = fR*y/R
az = self.force_z(x,y,z)
return ax,ay,az
def mass_density(self,x,y,z):
R2 = x**2+y**2
z2_b2 = z**2+self.b2
sqrt_z2_b2 = z2_b2.sqrt()
rho = self.b2*self.mass/(4.*numpy.pi) * \
(self.a*R2+(self.a+3.*sqrt_z2_b2)*(self.a+sqrt_z2_b2)**2) / \
((R2+(self.a+sqrt_z2_b2)**2)**2.5*z2_b2**1.5)
return rho
def circular_velocity_at_z0(self,R):
fR_at_z0 = self.force_R(R,0.|units.kpc,0.|units.kpc)
return (-R*fR_at_z0).sqrt()
def equivalent_enclosed_mass_in_plane(self,R):
"""
mass, that would be enclosed in profile corresponding the disk profile in the
galactic plane (z=0)
"""
fR_at_z0 = self.force_R(R,0.|units.kpc,0.|units.kpc)
return -R**2/self.G*fR_at_z0
class Plummer_profile(LiteratureReferencesMixIn):
"""
Spherically symmetric Plummer (1911) profile
* potential(r) = -GM / sqrt(a**2 + r**2)
* density(r) = (3M/4pi*a**3) * (1+(r/a)**2)**(-5/2)
.. [#] Plummer, H. C., MNRAS, Vol. 71, p.460-470 (1911)
:argument mass: total mass
:argument a: scale radius
"""
def __init__(self,mass,a,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.mass = mass
self.a = a
self.G = G
self.GM = self.G*self.mass
self.a2 = self.a**2
def radial_force(self,r):
r2 = r**2
return -self.GM*r*(r2+self.a2)**(-1.5)
def get_gravity_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
fr = self.radial_force(r)
ax = fr*x/r
ay = fr*y/r
az = fr*z/r
return ax, ay, az
def get_potential_at_point(self,eps,x,y,z):
r2 = x**2+y**2+z**2
return -self.GM/(r2+self.a2).sqrt()
def mass_density(self,r):
return self.mass/(4./3.*numpy.pi*self.a**3)*(1.+(r/self.a)**2)**(-2.5)
def enclosed_mass(self,r):
fr = self.radial_force(r)
return -r**2/self.G*fr
def circular_velocity(self,r):
fr = self.radial_force(r)
return (-r*fr).sqrt()
class PowerLawCutoff_profile(LiteratureReferencesMixIn):
"""
Spherically symmetric power-law density with exponential cut-off
* density(r) = rho0*(r0/r)^alpha*exp(-(r/rc)^2)
:argument rho0: density amplitude
:argument r0: power-law scaling radius
:argument alpha: power-law index, alpha<3
:argument rc: cut-off radius
"""
def __init__(self,rho0,r0,alpha,rc,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.rho0 = rho0
self.r0 = r0
self.alpha = alpha
self.rc = rc
self.G = G
self.rho0_r0_to_alpha = self.rho0*self.r0**self.alpha
if 3.<=self.alpha: print("Warning: power-law index must be less than 3.")
def get_potential_at_point(self,eps,x,y,z):
if scipy_imported == False:
AmuseWarning("importing scipy failed, maybe not installed")
r = (x**2+y**2+z**2).sqrt()
r_rc = r/self.rc
return -2.*numpy.pi*self.G*self.rho0_r0_to_alpha*self.rc**(3.-self.alpha)/r* \
(r/self.rc*gamma(1.-self.alpha/2.)*gammainc(1.-self.alpha/2.,(r/self.rc)**2.)-gamma(1.5-self.alpha/2.)*gammainc(1.5-self.alpha/2.,(r/self.rc)**2.))
def radial_force(self,r):
Mr = self.enclosed_mass(r)
return -self.G*Mr/r**2
def get_gravity_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
fr = self.radial_force(r)
ax = fr*x/r
ay = fr*y/r
az = fr*z/r
return ax, ay, az
def mass_density(self,r):
return self.rho0_r0_to_alpha*r**(-self.alpha)*numpy.exp(-(r/self.rc)**2.)
def enclosed_mass(self,r):
"""
careful with scipy.special.gammainc :
gammainc(a,x) = 1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
"""
if scipy_imported == False:
AmuseWarning("importing scipy failed, maybe not installed")
return 2.*numpy.pi*self.rho0_r0_to_alpha*self.rc**(3.-self.alpha)* \
gamma(1.5-0.5*self.alpha)*gammainc(1.5-0.5*self.alpha,(r/self.rc)**2)
def circular_velocity(self,r):
fr = self.radial_force(r)
return (-r*fr).sqrt()
class MWpotentialBovy2015(LiteratureReferencesMixIn):
"""
MW-like galaxy potential consists of a bulge modeled as a power-law density
profile that is exponentially cut-off, a Miyamoto & Nagai disk; and a
NFW dark-matter halo. Parameters of individual components are based on
fits to observational data. In addition to these constraints, the solar
distance to the Galactic center is set to R0=8kpc and the circular velocity
at the Sun to V0=220km/s.
.. [#] Bovy, J; ApJSS, Volume 216, Issue 2, article id. 29, 27 pp. (2015)
"""
def __init__(self, dist_gal_center = 9.0 | units.kpc):
LiteratureReferencesMixIn.__init__(self)
self.bulge = PowerLawCutoff_profile(2.22638e8|units.MSun/units.kpc**3, 1.|units.kpc, 1.8, 1.9|units.kpc)
self.disk = MiyamotoNagai_profile(6.81766163214e10|units.MSun, 3.|units.kpc, 0.28|units.kpc)
self.halo = NFW_profile(8484685.92946|units.MSun/units.kpc**3, 16.|units.kpc)
self.radius = dist_gal_center
def get_potential_at_point(self,eps,x,y,z):
x += self.radius
return self.bulge.get_potential_at_point(eps,x,y,z) + \
self.disk.get_potential_at_point(eps,x,y,z) + \
self.halo.get_potential_at_point(eps,x,y,z)
def get_gravity_at_point(self,eps,x,y,z):
x += self.
ax_b,ay_b,az_b = self.bulge.get_gravity_at_point(eps,x,y,z)
ax_d,ay_d,az_d = self.disk.get_gravity_at_point(eps,x,y,z)
ax_h,ay_h,az_h = self.halo.get_gravity_at_point(eps,x,y,z)
return ax_b+ax_d+ax_h, ay_b+ay_d+ay_h, az_b+az_d+az_h
def mass_density(self,x,y,z):
r = (x**2+y**2+z**2).sqrt()
return self.bulge.mass_density(r)+self.disk.mass_density(x,y,z)+self.halo.mass_density(r)
def circular_velocity(self,r):
return (self.bulge.circular_velocity(r)**2+self.disk.circular_velocity_at_z0(r)**2+self.halo.circular_velocity(r)**2).sqrt()
def enclosed_mass(self,r):
return self.bulge.enclosed_mass(r)+self.disk.equivalent_enclosed_mass_in_plane(r)+self.halo.enclosed_mass(r)
| |
# -*- coding: utf-8 -*-
"""
Created on Wed March 5th 2014
@author: Chad
"""
import scipy as sp #This brings in scipy - whenever I type sp in the program I would otherwise need to type scipy
from scipy import integrate
from matplotlib import pyplot as p #plotting package - the "as p" allows us to just type p instead of pyplot while configuring the plot settings
import time
import random as rand
rand.seed()
def Norris(t):
A=44 #285
tau=1.28 #14.74
ksai=-1.0 #-1.3862943
if t!=0.0:
norris = A*sp.exp(ksai*(t/tau+tau/t))
else:
norris=0.0
return norris
def Dn(z,n):
#constants #units
c = 2.99792458*10**5 # km/s
Ho = 2.175*10**-18 # 67.11(km/s)/Mpc ---> Hz
OM = 0.3175
OL = 0.6825
dist = lambda red: ((1+red)**n)/(sp.sqrt(OM*((1+red)**3)+OL))
integ = integrate.quad(dist,float(0),z)
d=(c/Ho)*integ[0]
return d
def deltas(t,E):
dt=[]
dE=[]
for i in range(len(t)-2): ################## FINDING dt and dE's
dt.append(t[i+2]-t[i])
Elist=[E[i],E[i+1],E[i+2]]
dE.append(max(Elist)-min(Elist))
return dt,dE
def randE(t,E):
done=[]
Es=[]
length=len(E)-1
while len(Es)!=len(t):
test=rand.randint(0,length)
if not(test in done):
Es.append(E[test])
done.append(test)
return Es
def Planck(deltat,scale): ################## SCALE FOR 1,10 AND 100 PLANCK LINES
c = 2.99792458*10**5 # km/s ################## COMPUTING PLANCK LENGTH PARAMTERS
Mp = 1.2209*(10**22) # MeV/c**2
redshift=.903 #=0.34 ################## REDSHIFT
Order = 1
#computing first order stuff
Qgrav = Mp*(c)
scale=1.0/scale
k1 = 1.0/(Qgrav*scale)
D1 = 1.37738149628*10**23 #Dn(redshift,Order)
#print D1
#file=open('distance.txt','w')
#file.write(str(D1))
#file.close()
pl=1.0/(k1*D1)
return deltat*pl
EEn , tti, ty, = sp.loadtxt('100MEV10DEGEVENTS.txt',unpack = True,skiprows=3)
ttti=[]
EEEn=[]
mev=100.0 ################### SELECT ENERGIES
mevlim=10000000.0
for i in range(len(tti)):
if EEn[i]>mev:
if EEn[i]<mevlim:
EEEn.append(EEn[i])
ttti.append(tti[i])
En=[]
ti=[]
starttime=tti[0]+1376.0 #955.0 ################## GRABBING TIMES 0-3s
for i in range(len(ttti)):
if ((ttti[i]-starttime)>0.0):
if (ttti[i]-starttime<3.0): #50.0
ti.append(ttti[i]-starttime)
En.append(EEEn[i])
dt,dE=deltas(ti,En) ################## FINDING REAL dt AND dE UNDER CURVE
realp=[0.0,0.0,0.0]
for i in range(len(dE)):
deet=dt[i]
for j in range(3):
scale=1.0*10.0**(-1*j)
if dE[i]>Planck(deet,scale):
realp[j]+=1.0
coup=[0,0,0,0]
for counting in range(4): ################## COUNTING COUPLES IN DATA
threshold=0.10/(10**(counting)) #start at 10s
for i in range(len(dt)):
if (dt[i])<threshold:
coup[counting]+=1.0
pwin=[0.0,0.0,0.0]
stopwatch=time.time() ################## START STOPWATCH
lastyay=1001.0
yay=[0,0,0,0]
nay=[0,0,0,0]
lastcoup=0.0
PHOTONCOUNT=float(len(ti))
#print ti
pwin=[0.0,0.0,0.0]
#print '------REAL TIMES ABOVE------'
iters = 5000000
for it in range(iters):
couple=[0,0,0,0]
faket=[]
while(len(faket)!=PHOTONCOUNT): ################## GENERATE FAKE PHOTONS
phot=rand.uniform(0.0,6.0) #18.0
tim=rand.uniform(0.0,3.0) #50.0
if Norris(tim)>phot:
faket.append(tim)
faket.sort() ################## SORTING FAKE PHOTONS
fakeE=randE(faket,En) ################## PULLING RANDOM ENERGIES - only under curve or from whole energy set?
fakedt , fakedE = deltas(faket,fakeE) ################## FINDING FAKE dt AND dE's
planck=[0.0,0.0,0.0]
for i in range(len(fakedt)):
fakedeet=fakedt[i]
for j in range(3):
scale=1.0*10.0**(-1*j)
if fakedE[i]>Planck(fakedeet,scale):
planck[j]+=1.0
for i in range(len(planck)):
if planck[i]>realp[i]:
pwin[i]+=1.0
#print 'Success at ',str(10**i),'th of the Planck Scale'
for counting in range(4):
threshold=0.10/(10**(counting))
for i in range(len(fakedt)):
if fakedt[i]<threshold:
couple[counting]+=1.0
if couple[counting]>=coup[counting]:
yay[counting]+=1.0
print coup
#print couple
print yay
print pwin
filename='090510MonteCarlofor'+str(mev)+'MeV.txt'################ SETUP FILE
file=open(filename,'w')
file.write('Real Data Couples below 0.1, 0.01, 0.001, 0.0001: '+str(coup)+'\n')
file.write('Couple Successes out of '+str(iters)+': '+str(yay)+'\n')
file.write('Real Pairs above 1,10,100 Planck lines '+str(realp)+'\n')
file.write('Successes above 1,10,100 Planck lines '+str(pwin)+'\n')
file.close()
print time.time()-stopwatch
#p.scatter(dt,dE,marker='x')
#p.yscale('log')
#p.xscale('log')
#p.title('Photons under Norris Curve for Energies>'+str(mev)+'MeV')
#p.xlim(10**-5,10**0)
#p.ylim(10**1,10**5)
| |
# dynamic.py
# Copyright (C) the SQLAlchemy authors and contributors
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from sqlalchemy import log, util
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import exc as sa_exc
from sqlalchemy.sql import operators
from sqlalchemy.orm import (
attributes, object_session, util as mapperutil, strategies, object_mapper
)
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.util import _state_has_identity, has_identity
from sqlalchemy.orm import attributes, collections
class DynaLoader(strategies.AbstractRelationLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
strategies._register_attribute(self,
mapper,
useobject=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class
)
def create_row_processor(self, selectcontext, path, mapper, row, adapter):
return (None, None)
log.class_logger(DynaLoader)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
accepts_scalar_loader = False
def __init__(self, class_, key, typecallable,
target_mapper, order_by, query_class=None, **kwargs):
super(DynamicAttributeImpl, self).__init__(class_, key, typecallable, **kwargs)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=False):
if passive:
return self._get_collection_history(state, passive=True).added_items
else:
return self.query_class(self, state)
def get_collection(self, state, dict_, user_data=None, passive=True):
if passive:
return self._get_collection_history(state, passive=passive).added_items
else:
history = self._get_collection_history(state, passive=passive)
return history.added_items + history.unchanged_items
def fire_append_event(self, state, dict_, value, initiator):
collection_history = self._modified_event(state, dict_)
collection_history.added_items.append(value)
for ext in self.extensions:
ext.append(state, value, initiator or self)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), True)
def fire_remove_event(self, state, dict_, value, initiator):
collection_history = self._modified_event(state, dict_)
collection_history.deleted_items.append(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), False)
for ext in self.extensions:
ext.remove(state, value, initiator or self)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state.modified_event(dict_,
self,
False,
attributes.NEVER_SET,
passive=attributes.PASSIVE_NO_INITIALIZE)
# this is a hack to allow the _base.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF):
if initiator is self:
return
self._set_iterable(state, dict_, value)
def _set_iterable(self, state, dict_, iterable, adapter=None):
collection_history = self._modified_event(state, dict_)
new_values = list(iterable)
if _state_has_identity(state):
old_collection = list(self.get(state, dict_))
else:
old_collection = []
collections.bulk_replace(new_values, DynCollectionAdapter(self, state, old_collection), DynCollectionAdapter(self, state, new_values))
def delete(self, *args, **kwargs):
raise NotImplementedError()
def get_history(self, state, dict_, passive=False):
c = self._get_collection_history(state, passive)
return attributes.History(c.added_items, c.unchanged_items, c.deleted_items)
def _get_collection_history(self, state, passive=False):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if not passive:
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(self, state, dict_, value, initiator, passive=False):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(self, state, dict_, value, initiator, passive=False):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
class DynCollectionAdapter(object):
"""the dynamic analogue to orm.collections.CollectionAdapter"""
def __init__(self, attr, owner_state, data):
self.attr = attr
self.state = owner_state
self.data = data
def __iter__(self):
return iter(self.data)
def append_with_event(self, item, initiator=None):
self.attr.append(self.state, self.state.dict, item, initiator)
def remove_with_event(self, item, initiator=None):
self.attr.remove(self.state, self.state.dict, item, initiator)
def append_without_event(self, item):
pass
def remove_without_event(self, item):
pass
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
Query.__init__(self, attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper.get_property(self.attr.key, resolve_synonyms=True)
self._criterion = prop.compare(
operators.eq,
instance,
value_is_parent=True,
alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def __session(self):
sess = object_session(self.instance)
if sess is not None and self.autoflush and sess.autoflush and self.instance in sess:
sess.flush()
if not has_identity(self.instance):
return None
else:
return sess
def session(self):
return self.__session()
session = property(session, lambda s, x:None)
def __iter__(self):
sess = self.__session()
if sess is None:
return iter(self.attr._get_collection_history(
attributes.instance_state(self.instance),
passive=True).added_items)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.__session()
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
passive=True).added_items.__getitem__(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.__session()
if sess is None:
return len(self.attr._get_collection_history(
attributes.instance_state(self.instance),
passive=True).added_items)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed" % (
mapperutil.instance_str(instance), self.attr.key))
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._order_by = self._order_by
return query
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = 'Appender' + cls.__name__
return type(name, (AppenderMixin, cls), {'query_class': cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
deleted = util.IdentitySet(apply_to.deleted_items)
added = apply_to.added_items
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = [o for o in util.IdentitySet(coll) if o not in deleted]
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
else:
self.deleted_items = []
self.added_items = []
self.unchanged_items = []
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Check for stylistic and formal issues in .rst and .py
# files included in the documentation.
#
# 01/2009, Georg Brandl
# TODO: - wrong versions in versionadded/changed
# - wrong markup after versionchanged directive
from __future__ import with_statement
import os
import re
import sys
import getopt
from os.path import join, splitext, abspath, exists
from collections import defaultdict
directives = [
# standard docutils ones
'admonition', 'attention', 'caution', 'class', 'compound', 'container',
'contents', 'csv-table', 'danger', 'date', 'default-role', 'epigraph',
'error', 'figure', 'footer', 'header', 'highlights', 'hint', 'image',
'important', 'include', 'line-block', 'list-table', 'meta', 'note',
'parsed-literal', 'pull-quote', 'raw', 'replace',
'restructuredtext-test-directive', 'role', 'rubric', 'sectnum', 'sidebar',
'table', 'target-notes', 'tip', 'title', 'topic', 'unicode', 'warning',
# Sphinx and Python docs custom ones
'acks', 'attribute', 'autoattribute', 'autoclass', 'autodata',
'autoexception', 'autofunction', 'automethod', 'automodule', 'centered',
'cfunction', 'class', 'classmethod', 'cmacro', 'cmdoption', 'cmember',
'code-block', 'confval', 'cssclass', 'ctype', 'currentmodule', 'cvar',
'data', 'decorator', 'decoratormethod', 'deprecated-removed',
'deprecated(?!-removed)', 'describe', 'directive', 'doctest', 'envvar',
'event', 'exception', 'function', 'glossary', 'highlight', 'highlightlang',
'impl-detail', 'index', 'literalinclude', 'method', 'miscnews', 'module',
'moduleauthor', 'opcode', 'pdbcommand', 'productionlist',
'program', 'role', 'sectionauthor', 'seealso', 'sourcecode', 'staticmethod',
'tabularcolumns', 'testcode', 'testoutput', 'testsetup', 'toctree', 'todo',
'todolist', 'versionadded', 'versionchanged'
]
all_directives = '(' + '|'.join(directives) + ')'
seems_directive_re = re.compile(r'(?<!\.)\.\. %s([^a-z:]|:(?!:))' % all_directives)
default_role_re = re.compile(r'(^| )`\w([^`]*?\w)?`($| )')
leaked_markup_re = re.compile(r'[a-z]::\s|`|\.\.\s*\w+:')
checkers = {}
checker_props = {'severity': 1, 'falsepositives': False}
def checker(*suffixes, **kwds):
"""Decorator to register a function as a checker."""
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
for prop in checker_props:
setattr(func, prop, kwds.get(prop, checker_props[prop]))
return func
return deco
@checker('.py', severity=4)
def check_syntax(fn, lines):
"""Check Python examples for valid syntax."""
code = ''.join(lines)
if '\r' in code:
if os.name != 'nt':
yield 0, '\\r in code file'
code = code.replace('\r', '')
try:
compile(code, fn, 'exec')
except SyntaxError as err:
yield err.lineno, 'not compilable: %s' % err
@checker('.rst', severity=2)
def check_suspicious_constructs(fn, lines):
"""Check for suspicious reST constructs."""
inprod = False
for lno, line in enumerate(lines):
if seems_directive_re.search(line):
yield lno+1, 'comment seems to be intended as a directive'
if '.. productionlist::' in line:
inprod = True
elif not inprod and default_role_re.search(line):
yield lno+1, 'default role used'
elif inprod and not line.strip():
inprod = False
@checker('.py', '.rst')
def check_whitespace(fn, lines):
"""Check for whitespace and line length issues."""
for lno, line in enumerate(lines):
if '\r' in line:
yield lno+1, '\\r in line'
if '\t' in line:
yield lno+1, 'OMG TABS!!!1'
if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, 'trailing whitespace'
@checker('.rst', severity=0)
def check_line_length(fn, lines):
"""Check for line length; this checker is not run by default."""
for lno, line in enumerate(lines):
if len(line) > 81:
# don't complain about tables, links and function signatures
if line.lstrip()[0] not in '+|' and \
'http://' not in line and \
not line.lstrip().startswith(('.. function',
'.. method',
'.. cfunction')):
yield lno+1, "line too long"
@checker('.html', severity=2, falsepositives=True)
def check_leaked_markup(fn, lines):
"""Check HTML files for leaked reST markup; this only works if
the HTML files have been built.
"""
for lno, line in enumerate(lines):
if leaked_markup_re.search(line):
yield lno+1, 'possibly leaked markup: %r' % line
def main(argv):
usage = '''\
Usage: %s [-v] [-f] [-s sev] [-i path]* [path]
Options: -v verbose (print all checked file names)
-f enable checkers that yield many false positives
-s sev only show problems with severity >= sev
-i path ignore subdir or file path
'''% argv[0]
try:
gopts, args = getopt.getopt(argv[1:], 'vfs:i:')
except getopt.GetoptError:
print(usage)
return 2
verbose = False
severity = 1
ignore = []
falsepos = False
for opt, val in gopts:
if opt == '-v':
verbose = True
elif opt == '-f':
falsepos = True
elif opt == '-s':
severity = int(val)
elif opt == '-i':
ignore.append(abspath(val))
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print(usage)
return 2
if not exists(path):
print('Error: path %s does not exist' % path)
return 2
count = defaultdict(int)
for root, dirs, files in os.walk(path):
# ignore subdirs in ignore list
if abspath(root) in ignore:
del dirs[:]
continue
for fn in files:
fn = join(root, fn)
if fn[:2] == './':
fn = fn[2:]
# ignore files in ignore list
if abspath(fn) in ignore:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print('Checking %s...' % fn)
try:
with open(fn, 'r', encoding='utf-8') as f:
lines = list(f)
except (IOError, OSError) as err:
print('%s: cannot open: %s' % (fn, err))
count[4] += 1
continue
for checker in checkerlist:
if checker.falsepositives and not falsepos:
continue
csev = checker.severity
if csev >= severity:
for lno, msg in checker(fn, lines):
print('[%d] %s:%d: %s' % (csev, fn, lno, msg))
count[csev] += 1
if verbose:
print()
if not count:
if severity > 1:
print('No problems with severity >= %d found.' % severity)
else:
print('No problems found.')
else:
for severity in sorted(count):
number = count[severity]
print('%d problem%s with severity %d found.' %
(number, number > 1 and 's' or '', severity))
return int(bool(count))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
from status_list import status_list, cookie_handler
from os import sep
from struct import pack, unpack
from sys import maxint as no_cookie
dd = 'icons' + sep + 'clan' + sep
icon = {'Chieftain': dd + 'chieftain.gif',
'Shaman': dd + 'shaman.gif',
'Grunt': dd + 'grunt.gif',
'Peon (>1 week)': dd + 'peon.gif',
'Peon (<1 week)': dd + 'peon2.gif'}
ranks = ['Chieftain',
'Shaman',
'Grunt',
'Peon (>1 week)',
'Peon (<1 week)']
rank_idx = ['c',
's',
'g',
'p']
statuses = ['offline',
'online',
'in a channel',
'in a public game',
'in a private game']
set_results = {0x00: 'Rank change was successful.',
0x01: 'Rank change failed.',
0x02: 'You cannot change that user\'s rank yet.',
0x04: 'Rank change declined.',
0x05: 'Rank change failed.',
0x07: 'You are not authorized to change ranks because your rank is too low.',
0x08: 'You are not authorized to change the specified user\'s rank because of his/her current rank.'}
invite_results = {0x00: 'Invitation accepted.',
0x04: 'Invitation declined.',
0x05: 'Failed to invite user.',
0x09: 'Invite failed because clan is full.'}
find_cand_results={0x00: 'Successfully found candidates.',
0x01: 'Clan tag is already taken.',
0x02: 'Candidate search failed because bot CD key has been used to create a clan in past week.',
0x08: 'This account is already in a clan.',
0x0A: 'Invalid clan tag.'}
create_results = {0x00: 'Clan created successfully.',
0x04: 'Somebody declined the invite.',
0x05: 'An invited user was unavailable.'}
def create_tag(tag):
rev = reversed(tag.ljust(4, '\0'))
ret = ''
for x in rev:
ret += x
return ret[:4]
def rank_change_result(status):
try:
return set_results[status]
except KeyError:
return 'Rank change failed for an unknown reason.'
def format_tag(tag):
it = reversed(tag)
ret = ''
for x in it:
ret += x
for x in range(4 - len(ret)):
ret += '\0'
return ret
class __init__():
def __init__(self, bot):
self.bot = bot
self.invite = {}
self.responses = cookie_handler()
self.clanned = False
self.creating = False
self.bot.events.add(self, 'ui', 1, 0,
'reload', self.ui_reload)
self.bot.events.add(self, 'bot', 1, 0,
'disc', self.disc,
'connected', self.get_clan)
#self.bot.events.add(self, 'ui', 'list', 'clan', 0, 0,
# 'selected', self.get_clan)
self.bot.events.add(self, 'BNCSRecv', -1, 0,
0x70, self.recv_cand,
0x71, self.recv_mult_invite,
0x73, self.recv_disband,
0x74, self.recv_chief,
0x75, self.recv_info,
0x76, self.recv_quit,
0x77, self.recv_invite,
0x78, self.recv_set_rank,
0x79, self.recv_invited,
0x7A, self.recv_set_rank,
0x7C, self.recv_motd,
0x7D, self.recv_clan,
0x7E, self.recv_removed,
0x7F, self.recv_status,
0x81, self.recv_rank_change,
0x82, self.recv_member_info)
self.bot.events.add(self, 'commands', 0, 0,
'start', self.add_commands)
self.add_commands()
def ui_reload(self):
self.ui_start()
self.reset_list()
def in_clan(self):
self.clanned = True
self.clan = status_list(self.bot, 'clan')
self.bot.clan = self.clan
self.ui_start()
def out_clan(self):
if self.clanned:
self.clanned = False
self.clan.clear()
try:
del self.clan
del self.bot.clan
except NameError:
pass
self.bot.events.call('ui', 'list', 'remove', ['clan'])
self.bot.events.call('ui', 'menu', 'remove', ['clan'])
def clear(self):
self.clan.clear()
self.clan.online = 0
self.bot.events.call('ui', 'list', 'clan', 'clear')
def ui_start(self):
self.bot.events.call('ui', 'list', 'add',
['list', 'clan', 'Clan', 31, 19,
'Icon', 2, 39,
'Username', 0, 135,
'Location', 0, 100], icon)
self.bot.events.call('ui', 'menu', 'add',
['Clan',
'Get MOTD', self.get_motd,
'Leave Clan', self.remove_self,
'Disband', self.get_disband])
self.bot.events.call('ui', 'list', 'clan', 'menu',
['Make Chieftain', self.set_rank_chieftain,
'Make Shaman', self.set_rank_shaman,
'Make Grunt', self.set_rank_grunt,
'Make Peon', self.set_rank_peon,
'Remove', self.remove_user,
'Profile', self.get_profile])
self.tab = True
def add_commands(self):
self.bot.events.add(self, 'command', 0, 0,
'motd', self.get_motd,
'setmotd', self.set_motd,
'accept', self.get_accept,
'cwhois', self.get_member_info,
'setrank', self.set_rank,
'invite', self.invite_user,
'makeclan', self.find_cand)
def clan_user(self):
return self.clan.name_from_idx(self.bot.status['selected'].pop())
def invite_user(self, rest):
if type(rest) == list:
for x in range(len(self.bot.status['selected'])):
self.bot.BNCS.insert_long(no_cookie)
self.bot.BNCS.insert_string(self.clan_user())
self.bot.BNCS.BNCSsend(0x77)
else:
self.bot.BNCS.insert_long(self.responses.add(rest))
self.bot.BNCS.insert_string(rest['arg'])
self.bot.BNCS.BNCSsend(0x77)
def remove_self(self, *rest):
self.remove = self.bot.status['username']
self.bot.confirm('Quit Clan',
'Are you sure you want to leave the clan?',
self.really_remove_user)
def remove_user(self, *rest):
for x in range(len(self.bot.status['selected'])):
self.remove = self.clan_user()
self.bot.confirm('Remove User', 'Are you sure you want to remove ' +
self.remove + ' from the clan?',
self.really_remove_user)
def really_remove_user(self):
self.bot.BNCS.insert_long(no_cookie)
self.bot.BNCS.insert_string(self.remove)
self.bot.BNCS.BNCSsend(0x78)
del self.remove
def set_rank_peon(self, *rest):
for x in range(len(self.bot.status['selected'])):
self.set_rank(self.clan_user(), 0x01)
def set_rank_grunt(self, *rest):
for x in range(len(self.bot.status['selected'])):
self.set_rank(self.clan_user(), 0x02)
def set_rank_shaman(self, *rest):
for x in range(len(self.bot.status['selected'])):
self.set_rank(self.clan_user(), 0x03)
def set_rank_chieftain(self, *rest):
self.bot.confirm('Give Chieftain',
'Are you sure you want to give up chieftain to this user?',
self.really_set_rank_chief_from_menu)
def really_set_rank_chief_from_menu(self):
self.really_set_rank_chief(self.clan_user(),
no_cookie)
def really_set_rank_chief(self, un, cookie):
self.bot.BNCS.insert_long(cookie)
self.bot.BNCS.insert_string(un)
self.bot.BNCS.BNCSsend(0x74)
def set_rank(self, rest, rank=0):
if type(rest) == str:
self.bot.BNCS.insert_long(no_cookie)
self.bot.BNCS.insert_string(rest)
else:
user, rank = rest['arg'].split(' ', 1)
try:
rank = int(rank)
except ValueError:
try:
rank = rank_idx.index(rank[0].lower())
except ValueError:
self.bot.respond(rest, 'No such rank')
return
if rank == 0x00:
self.really_set_rank_chief(user, self.responses.add(rest))
return
self.bot.BNCS.insert_long(self.responses.add(rest))
self.bot.BNCS.insert_string(user)
self.bot.BNCS.insert_byte(rank)
self.bot.BNCS.BNCSsend(0x7A)
def set_motd(self, cmd):
self.bot.BNCS.insert_long(0)
self.bot.BNCS.insert_string(cmd['arg'])
self.bot.BNCS.BNCSsend(0x7B)
self.bot.respond(cmd, 'New message of the day set.')
def get_member_info(self, cmd):
tag, name = cmd['arg'].strip().split(' ', 1)
cmd['arg'] = {'name': name,
'tag': tag}
name = name.lower()
tag = format_tag(tag)
self.bot.BNCS.insert_long(self.responses.add(cmd))
self.bot.BNCS.insert_raw(tag)
self.bot.BNCS.insert_string(name)
self.bot.BNCS.BNCSsend(0x82)
def get_accept(self, cmd):
tag = cmd['arg'].strip().lower()
try:
info = self.invite[tag]
except KeyError:
self.bot.respond(cmd, 'There is no record of an invite from Clan ' +\
cmd['arg'] + '.')
return
self.bot.BNCS.insert_long(info[1]) #Cookie
self.bot.BNCS.insert_long(info[4]) #Clan tag as DWORD
self.bot.BNCS.insert_string(info[3]) #Inviter
self.bot.BNCS.insert_byte(0x06) #Accept code
self.bot.BNCS.BNCSsend(info[0])
self.bot.respond(cmd, 'Invitation to Clan ' + info[2] + ' accepted.')
def disc(self):
#self.clear()
self.invite = {}
self.out_clan()
def get_profile(self, *rest):
for x in range(len(self.bot.status['selected'])):
self.bot.events.call('profile', 'request',
[self.clan_user()])
def get_motd(self, *rest):
if type(rest[0]) == dict:
self.bot.BNCS.insert_long(self.responses.add(rest[0]))
else:
self.bot.BNCS.insert_long(no_cookie)
self.bot.BNCS.BNCSsend(0x7C)
def get_disband(self, *rest):
self.bot.confirm('Disband Clan',
'Do you really want to disband the clan?',
self.get_really_disband)
def get_really_disband(self, *rest):
self.bot.BNCS.insert_long(0)
self.bot.BNCS.BNCSsend(0x73)
def pick_color(self, online):
if online:
return '#00FF00'
else:
return '#FF0000'
def get_clan(self, username=''):
if (self.bot.config['login']['product'] in ['WAR3', 'W3XP']) == False:
return
self.bot.BNCS.insert_long(0)
self.bot.BNCS.BNCSsend(0x7D)
def find_cand(self, cmd):
if self.creating:
self.cand.clear()
self.bot.events.call('ui', 'list', 'cand', 'clear')
self.bot.BNCS.insert_long(self.responses.add(cmd))
self.bot.BNCS.insert_raw(create_tag(cmd['arg'].split(' ', 1)[0]))
self.bot.BNCS.BNCSsend(0x70)
def create_invite(self, *rest):
if self.creating == False:
self.bot.addchat('error', 'Clan creation not enabled.')
return
if len(self.bot.status['selected']) < 9:
self.bot.addchat('error', 'Not enough users selected.')
return
self.bot.BNCS.insert_long(no_cookie)
self.bot.BNCS.insert_string(self.cand.name)
self.bot.BNCS.insert_raw(create_tag(self.cand.tag))
self.bot.BNCS.insert_byte(9)
for x in self.bot.status['selected'][:9]:
self.bot.BNCS.insert_string(self.cand.name_from_idx(x))
self.bot.BNCS.BNCSsend(0x71)
self.bot.addchat('clan', 'Invites sent.')
def recv_mult_invite(self, packet):
cookie, result = unpack('<LB', packet['data'][:5])
users = packet['data'][5:].split('\0')[:-1]
self.out_cand()
self.bot.addchat('clan', create_results[result])
if users != []:
build = users[0]
if len(user) > 1:
for user in users[1:-1]:
build += ', ' + users
build += ', and ' + users[-1]
self.bot.addchat('error', 'Invites for these user(s) failed: ' + build)
def recv_cand(self, packet):
cookie, status, num = unpack('<L2B', packet['data'][:6])
users = packet['data'][6:].split('\0')[:-1]
users.sort()
cmd = self.responses.pop(cookie)
if num == 0 and status == 0x00:
self.bot.respond(cmd, 'No candidates found.')
return
self.bot.respond(cmd, find_cand_results[status])
self.cand = status_list(self.bot, 'cand')
self.cand.tag, self.cand.name = cmd['arg'].split(' ', 1)
if self.creating == False:
self.bot.events.call('ui', 'list', 'add',
['list', 'cand', 'Candidates', 31, 19,
'Username', 0, 175,
'Type', 0, 75], {})
self.bot.events.call('ui', 'list', 'cand', 'menu',
['Invite', self.create_invite])
self.creating = True
for user in users:
if self.bot.channel.has_user(user):
info = {'username': user,
'type': 'Channel'}
else:
info = {'username': user,
'type': 'Friend'}
self.cand.add_user(user, info, -1)
self.reset_cand_list()
def update_cand_header(self):
self.bot.events.call('ui', 'list', 'cand', 'header',
['Clan ' + self.cand.tag + ' (' +\
str(self.cand.count) + ')'])
def reset_cand_list(self):
self.bot.events.call('ui', 'list', 'cand', 'clear')
for x in self.cand.order:
self.bot.events.call('ui', 'list', 'cand', 'add_entry',
[self.cand.user[x]['username'],
self.cand.user[x]['type']],
{'color': '#FFFFFF'})
self.update_cand_header()
def out_cand(self):
if self.creating:
self.creating = False
try:
del self.cand
except NameError:
pass
self.bot.events.call('ui', 'list', 'remove', ['cand'])
def recv_make_invited(self, packet):
cookie, tag_long = unpack('<2L', packet['data'][:8])
tag = pack('>L', tag_long).strip('\0 ')
rest = packet['data'][8:].split('\0', 2)
clan_name = rest[0]
inviter = rest[1]
num_invited = ord(rest[2][0])
users = rest[2][1].split('\0')[:-1]
self.bot.addchat('clan', 'You have been invited to Clan ' +\
clan_name + ' (' + tag + ') by ' + inviter)
if num_invited != 0:
self.bot.addchat('clan', str(num_invited) + ' others were invited: ' +\
str(users))
self.bot.addchat('clan', 'Type "/accept ' + tag + '" to accept this invitation.')
self.invite[tag.lower()] = [packet['id'],
cookie, tag, inviter, tag_long]
def recv_invited(self, packet):
cookie, tag_long = unpack('<2L', packet['data'][:8])
tag = pack('>L', tag_long).strip('\0 ')
rest = packet['data'][8:].split('\0', 1)
clan_name = rest[0]
inviter = rest[1][:-1]
self.bot.addchat('clan', 'You have been invited to Clan ' +\
clan_name + ' (' + tag + ') by ' + inviter)
self.bot.addchat('clan', 'Type "/accept ' + tag + '" to accept this invitation.')
self.invite[tag.lower()] = [packet['id'],
cookie, tag, inviter, tag_long]
def recv_invite(self, packet):
cookie, result = unpack('<LB', packet['data'][:5])
try:
cmd = self.responses.pop(cookie)
except KeyError:
self.bot.addchat('clan', invite_results[result])
else:
self.bot.respond(cmd, invite_results[result])
def recv_info(self, packet):
rank = ranks[4 - ord(packet['data'][5])]
clan = pack('>L', unpack('<L', packet['data'][1:-1])[0]).strip('\0 ')
self.bot.addchat('clan', 'You are a ' + rank + ' in Clan ' + clan + '.')
self.in_clan()
self.clan.tag = clan
def recv_motd(self, packet):
cookie = unpack('<L', packet['data'][:4])[0]
try:
cmd = self.responses.pop(cookie)
except KeyError:
self.bot.addchat('clan', packet['data'][8:-1])
else:
self.bot.respond(cmd, packet['data'][8:-1])
def recv_quit(self, packet):
self.bot.addchat('clan', 'You have been removed from the clan.')
self.out_clan()
def recv_status(self, packet):
sp = packet['data'].find('\0')
name = packet['data'][:sp]
rank = 4 - ord(packet['data'][sp + 1])
rank_name = ranks[rank]
status = ord(packet['data'][sp + 2])
loc = packet['data'][sp+3:-1]
un = self.clan.user[name.lower()]
if un['rank'] != rank_name:
self.bot.addchat('clan', 'Clan member ' + name + ' (Currently ' + \
statuses[status] + ') is now a ' + rank_name + '.')
else:
self.bot.addchat('clan',
'Clan member ' + name + ' (' + rank_name +\
') is now ' + statuses[status] +\
(loc == '' and '.' or (' (' + loc + ').')))
self.update_user(username=name, rank=rank,
status=status, location=loc)
def update_user(self, **kw):
old = self.clan.user[kw['username'].lower()]
old_idx = self.clan.order.index(kw['username'].lower())
del self.clan.sorted[old_idx]
kw['online'] = bool(kw['status'])
if kw['online'] and not old['online']:
self.clan.online += 1
elif not kw['online'] and old['online']:
self.clan.online -= 1
info = [kw['rank'], (1 - kw['status']), kw['location'], kw['username']]
self.clan.sorted.append(info)
self.clan.sorted.sort()
new_idx = self.clan.sorted.index(info)
kw['rank'] = ranks[kw['rank']]
if old_idx == new_idx:
self.bot.events.call('ui', 'list', 'clan', 'upd_entry',
[old_idx,
0, str(kw['rank']),
1, kw['username'],
2, kw['location']],
{'color': self.pick_color(kw['online'])})
else:
self.bot.events.call('ui', 'list', 'clan', 'upd_entry',
[old_idx,
str(kw['rank']),
kw['username'],
kw['location']],
{'color': self.pick_color(kw['online']),
'newidx': new_idx})
self.clan.del_user(kw['username'])
self.clan.add_user(kw['username'].lower(), kw, new_idx)
self.update_header()
def del_user(self, name):
try:
idx = self.clan.order.index(name.lower())
except ValueError:
return
if self.clan.user[name]['online']:
self.clan.online -= 1
self.clan.del_user(name)
self.bot.events.call('ui', 'list', 'clan', 'remove_entry',
[idx])
self.update_header()
def recv_rank_change(self, packet):
if len(packet['data']) < 3:
return
old = ord(packet['data'][0])
new = ord(packet['data'][1])
if old == new: #why?
return
name = packet['data'][2:]
self.bot.addchat('clan',
'You were ' +\
(new > old and 'promoted' or 'demoted') +\
' to ' + ranks[4 - new] + ' (used to be ' +\
ranks[4 - old] + ') by ' + name)
def recv_set_rank(self, packet):
cookie, status = unpack('<LB', packet['data'][:5])
try:
cmd = self.responses.pop(cookie)
except KeyError:
self.bot.addchat('clan', rank_change_result(status))
else:
self.bot.respond(cmd, rank_change_result(status))
def recv_chief(self, packet):
cookie, status = unpack('<LB', packet['data'][:5])
try:
cmd = self.responses.pop(cookie)
except KeyError:
self.bot.addchat('clan', rank_change_result(status))
else:
self.bot.respond(cmd, rank_change_result(status))
def recv_removed(self, packet):
name = packet['data'][:-1]
self.bot.addchat('clan', name + ' has been removed from the clan.')
self.del_user(name)
def recv_disband(self, packet):
res = ord(packet['data'][4])
if res == 0x00:
self.bot.addchat('success', 'The clan disbanded successfully.')
elif res == 0x02:
self.bot.addchat('error', 'You cannot disband the clan because it is not yet one week old.')
elif res == 0x07:
self.bot.addchat('error', 'You are not authorized to disband the clan.')
else:
self.bot.addchat('error', 'There was an unknown error in disbanding the clan (' + str(res) + ')')
def recv_member_info(self, packet):
slen = len(packet['data']) - 15
cookie, status, clan_name, rank, joined =\
unpack('<LB' + str(slen) +\
'sxB8s', packet['data'])
rank = ranks[4 - rank]
try:
cmd = self.responses.pop(cookie)
except:
return
self.bot.respond(cmd, cmd['arg']['name'] + ' is a ' + \
rank + ' in Clan ' + clan_name + ' (' +\
cmd['arg']['tag'] + ').')
def update_header(self):
self.bot.events.call('ui', 'list', 'clan', 'header',
['Clan ' + self.clan.tag + ' (' +\
str(self.clan.online) + '/' +\
str(self.clan.count) + ')'])
def reset_list(self):
self.bot.events.call('ui', 'list', 'clan', 'clear')
for x in self.clan.order:
self.bot.events.call('ui', 'list', 'clan', 'add_entry',
[str(self.clan.user[x]['rank']),
self.clan.user[x]['username'],
self.clan.user[x]['location']],
{'color': self.pick_color(self.clan.user[x]['online'])})
self.update_header()
def recv_clan(self, packet):
self.clear()
num = ord(packet['data'][4])
info = packet['data'][5:]
users = []
for x in range(num):
sp = info.find('\0')
name = info[:sp]
rank = 4 - ord(info[sp+1])
status = 1 - ord(info[sp+2])
if not status:
self.clan.online += 1
info = info[sp+3:]
sp = info.find('\0')
loc = info[:sp]
info = info[sp+1:]
users.append([rank, status, loc, name])
users.sort()
self.clan.sorted = users
for x in users:
info = {'rank': ranks[x[0]],
'username': x[3],
'online': not bool(x[1]),
'location': x[2]}
self.clan.add_user(x[3].lower(), info, -1)
self.reset_list()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from thrift.publisher import *
from thrift.gen.Exception.ttypes import ThriftSessionExpiredException
from ..util.log import *
from exception import ThriftReceiverOfflineException
import time
class StreamDefinition:
"""
Represents a BAM/CEP stream definition
"""
STRING = 'STRING'
DOUBLE = 'DOUBLE'
INT = 'INT'
LONG = 'LONG'
BOOL = 'BOOL'
def __init__(self):
self.name = None
""":type : str"""
self.version = None
""":type : str"""
self.nickname = None
""":type : str"""
self.description = None
""":type : str"""
self.meta_data = []
""":type : list[str]"""
self.correlation_data = []
""":type : list[str]"""
self.payload_data = []
""":type : list[str]"""
self.stream_id = None
""" :type : str """
def add_metadata_attribute(self, attr_name, attr_type):
self.meta_data.append({"name": attr_name, "type": attr_type})
def add_payloaddata_attribute(self, attr_name, attr_type):
self.payload_data.append({"name": attr_name, "type": attr_type})
def add_correlationdata_attribute(self, attr_name, attr_type):
self.correlation_data.append({"name": attr_name, "type": attr_type})
def __str__(self):
"""
To string override
"""
json_str = "{"
json_str += "\"name\":\"" + self.name + "\","
json_str += "\"version\":\"" + self.version + "\","
json_str += "\"nickName\":\"" + self.nickname + "\","
json_str += "\"description\":\"" + self.description + "\","
# add metadata attributes if exists
if len(self.meta_data) > 0:
json_str += "\"metaData\":["
for metadatum in self.meta_data:
json_str += "{\"name\":\"" + metadatum["name"] + "\", \"type\": \"" + metadatum["type"] + "\"},"
json_str = json_str[:-1] + "],"
# add correlationdata attributes if exists
if len(self.correlation_data) > 0:
json_str += "\"correlationData\":["
for coredatum in self.correlation_data:
json_str += "{\"name\":\"" + coredatum["name"] + "\", \"type\": \"" + coredatum["type"] + "\"},"
json_str = json_str[:-1] + "],"
# add payloaddata attributes if exists
if len(self.payload_data) > 0:
json_str += "\"payloadData\":["
for payloaddatum in self.payload_data:
json_str += "{\"name\":\"" + payloaddatum["name"] + "\", \"type\": \"" + payloaddatum["type"] + "\"},"
json_str = json_str[:-1] + "],"
json_str = json_str[:-1] + "}"
return json_str
class ThriftEvent:
"""
Represents an event to be published to a BAM/CEP monitoring server
"""
def __init__(self):
self.metaData = []
""":type : list[str]"""
self.correlationData = []
""":type : list[str]"""
self.payloadData = []
""":type : list[str]"""
class ThriftPublisher:
"""
Handles publishing events to BAM/CEP through thrift using the provided address and credentials
"""
log = LogFactory().get_log(__name__)
def __init__(self, ip, port, username, password, stream_definition):
"""
Initializes a ThriftPublisher object.
At initialization a ThriftPublisher connects and defines a stream definition. A connection
should be disconnected after all the publishing has been done.
:param str ip: IP address of the monitoring server
:param str port: Port of the monitoring server
:param str username: Username
:param str password: Password
:param StreamDefinition stream_definition: StreamDefinition object for this particular connection
:return: ThriftPublisher object
:rtype: ThriftPublisher
"""
try:
port_number = int(port)
except ValueError:
raise RuntimeError("Port number for Thrift Publisher is invalid: %r" % port)
self.__publisher = Publisher(ip, port_number)
self.__publisher.connect(username, password)
self.__publisher.defineStream(str(stream_definition))
self.stream_definition = stream_definition
self.stream_id = self.__publisher.streamId
self.ip = ip
self.port = port
self.username = username
self.password = password
def publish(self, event):
"""
Publishes the given event by creating the event bundle from the log event
:param ThriftEvent event: The log event to be published
:return: void
"""
event_bundle = self.create_event_bundle(event)
try:
self.__publisher.publish(event_bundle)
self.log.debug("Published event to thrift stream [%r]" % self.stream_id)
except ThriftSessionExpiredException:
self.log.debug("ThriftSession expired. Reconnecting")
self.__publisher.connect(self.username, self.password)
self.log.debug("connected! stream ID: %r" % self.stream_id)
self.publish(event)
except Exception as ex:
raise ThriftReceiverOfflineException(ex)
def create_event_bundle(self, event):
"""
Creates an EventBundle object to be published to the Thrift stream
:param ThriftEvent event:
:return: EventBundle event bundle object
"""
event_bundle = EventBundle()
event_bundle.addStringAttribute(self.stream_id)
event_bundle.addLongAttribute(time.time() * 1000)
ThriftPublisher.assign_attributes(event.metaData, event_bundle)
ThriftPublisher.assign_attributes(event.correlationData, event_bundle)
ThriftPublisher.assign_attributes(event.payloadData, event_bundle)
return event_bundle
def disconnect(self):
"""
Disconnect the thrift publisher
:return: void
"""
self.__publisher.disconnect()
@staticmethod
def assign_attributes(attributes, event_bundler):
"""
Adds the given attributes to the given event bundler according to type of each attribute
:param list attributes: attributes to be assigned
:param EventBundle event_bundler: Event bundle to assign attributes to
:return: void
"""
# __intAttributeList = []
# __longAttributeList = []
# __doubleAttributeList = []
# __boolAttributeList = []
# __stringAttributeList = []
if attributes is not None and len(attributes) > 0:
for attrib in attributes:
if isinstance(attrib, int):
event_bundler.addIntAttribute(attrib)
elif isinstance(attrib, long):
event_bundler.addLongAttribute(attrib)
elif isinstance(attrib, float):
event_bundler.addDoubleAttribute(attrib)
elif isinstance(attrib, bool):
event_bundler.addBoolAttribute(attrib)
elif isinstance(attrib, str):
event_bundler.addStringAttribute(attrib)
else:
ThriftPublisher.log.error("Undefined attribute type: %r" % attrib)
| |
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy as np
def numpy_floatX(data):
return np.asarray(data, dtype=theano.config.floatX)
class Dropout_layer(object):
def __init__(self, x, p=0.5):
use_noise = theano.shared(numpy_floatX(0.))
trng = RandomStreams(415)
self.output = T.switch(use_noise,
(x * trng.binomial(x.shape, p=p, n=1, dtype=x.dtype)),
x * p
)
class Embedding_layer(object):
def __init__(self, x, emb, word_size=100, prefix='embedd_layer_'):
n_steps = x.shape[1]
n_samples = x.shape[0]
self.x = T.transpose(x)
# L2-normalize the embedding matrix
emb_ = np.sqrt(np.sum(emb ** 2, axis=1))
emb = emb / np.dot(emb_.reshape(-1, 1), np.ones((1, emb.shape[1])))
emb[0, :] = 0.
self.emb = theano.shared(
value=np.asarray(emb, dtype=theano.config.floatX),
name=prefix + 'emb',
borrow=True
)
self.output = self.emb[self.x.flatten()].reshape([n_steps, n_samples, word_size])
self.params = {prefix+'emb': self.emb}
class Embedding_layer_uniEmb(object):
def __init__(self, x, emb, word_size=100, prefix='embedd_layer_'):
n_steps = x.shape[1]
n_samples = x.shape[0]
self.x = T.transpose(x)
self.output = emb[self.x.flatten()].reshape([n_steps, n_samples, word_size])
self.params = {}
class LogisticRegression(object):
def __init__(self, x, y, in_size, out_size, prefix='lr_'):
self.W = theano.shared(
value=np.random.uniform(
low=-np.sqrt(6. / (in_size + out_size)),
high=np.sqrt(6. / (in_size + out_size)),
size=(in_size, out_size)
).astype(theano.config.floatX),
name='W',
borrow=True
)
self.b = theano.shared(
value=np.random.uniform(
low=-np.sqrt(6. / (in_size + out_size)),
high=np.sqrt(6. / (in_size + out_size)),
size=(out_size,)
).astype(theano.config.floatX),
name='b',
borrow=True
)
self.y_given_x = T.nnet.softmax(T.dot(x, self.W) + self.b)
self.y_d = T.argmax(self.y_given_x, axis=1)
self.loss = -T.mean(T.log(self.y_given_x)[T.arange(y.shape[0]), y])
self.error = T.mean(T.neq(self.y_d, y))
self.params = {prefix+'W': self.W, prefix+'b': self.b}
class LSTM_layer(object):
def __init__(self, x, mask=None, in_size=100, hidden_size=400, mean_pooling=False, prefix='lstm_'):
"""attention, every column in input is a sample"""
def random_weights(x_dim, y_dim):
return np.random.uniform(
low=-np.sqrt(6. / (x_dim + y_dim)),
high=np.sqrt(6. / (x_dim + y_dim)),
size=(x_dim, y_dim)
).astype(theano.config.floatX)
self.W = theano.shared(
value=np.concatenate(
[random_weights(in_size, hidden_size),
random_weights(in_size, hidden_size),
random_weights(in_size, hidden_size),
random_weights(in_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix+'W',
borrow=True
)
self.U = theano.shared(
value=np.concatenate(
[random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix+'U',
borrow=True
)
self.b = theano.shared(
value=np.zeros((4 * hidden_size,)).astype(theano.config.floatX),
name=prefix+'b',
borrow=True
)
assert mask is not None
n_steps = x.shape[0]
if x.ndim == 3:
n_samples = x.shape[1]
else:
n_samples = 1
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_, c_):
preact = T.dot(h_, self.U)
preact += x_
i = T.nnet.sigmoid(_slice(preact, 0, hidden_size))
f = T.nnet.sigmoid(_slice(preact, 1, hidden_size))
o = T.nnet.sigmoid(_slice(preact, 2, hidden_size))
c = T.tanh(_slice(preact, 3, hidden_size))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * T.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
input = (T.dot(x, self.W) + self.b)
rval, updates = theano.scan(_step,
sequences=[mask, input],
outputs_info=[T.alloc(numpy_floatX(0.), n_samples, hidden_size),
T.alloc(numpy_floatX(0.), n_samples, hidden_size)],
name=prefix+'_scan',
n_steps=n_steps)
if mean_pooling:
hidden_sum = (rval[0] * mask[:, :, None]).sum(axis=0)
self.output = hidden_sum / mask.sum(axis=0)[:, None]
else:
self.output = rval[0][-1, :, :]
self.out_all = rval[0]
self.params = {prefix+'W' : self.W, prefix+'U': self.U, prefix+'b': self.b}
class GRU_layer(object):
def __init__(self, x, mask=None, in_size=100, hidden_size=400, mean_pooling=False, prefix='gru_'):
"""attention, every column in input is a sample"""
def random_weights(x_dim, y_dim):
return np.random.uniform(
low=-np.sqrt(6. / (x_dim + y_dim)),
high=np.sqrt(6. / (x_dim + y_dim)),
size=(x_dim, y_dim)
).astype(theano.config.floatX)
self.W = theano.shared(
value=np.concatenate(
[random_weights(in_size, hidden_size),
random_weights(in_size, hidden_size),
random_weights(in_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix+'W',
borrow=True
)
self.U = theano.shared(
value=np.concatenate(
[random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix+'U',
borrow=True
)
self.b = theano.shared(
value=np.zeros((3 * hidden_size,)).astype(theano.config.floatX),
name=prefix+'b',
borrow=True
)
assert mask is not None
n_steps = x.shape[0]
if x.ndim == 3:
n_samples = x.shape[1]
else:
n_samples = 1
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_):
preact = T.dot(h_, self.U)
preact += x_
z = T.nnet.sigmoid(_slice(preact, 0, hidden_size))
r = T.nnet.sigmoid(_slice(preact, 1, hidden_size))
c = T.tanh(_slice(preact, 2, hidden_size) * r + (T.ones_like(r) - r) * _slice(x_, 2, hidden_size))
h = (T.ones_like(z) - z) * c + z * h_
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
input = (T.dot(x, self.W) + self.b)
rval, updates = theano.scan(_step,
sequences=[mask, input],
outputs_info=[T.alloc(numpy_floatX(0.), n_samples, hidden_size)],
name=prefix+'_scan',
n_steps=n_steps)
if mean_pooling:
hidden_sum = (rval * mask[:, :, None]).sum(axis=0)
self.output = hidden_sum / mask.sum(axis=0)[:, None]
else:
self.output = rval[-1, :, :]
self.out_all = rval
self.params = {prefix+'W': self.W, prefix+'U': self.U, prefix+'b': self.b}
class GRU_layer_uniParam(object):
def __init__(self, x, W, U, b, mask=None, in_size=100, hidden_size=400, mean_pooling=False, prefix='uni_gru_'):
"""attention, every column in input is a sample"""
self.W = W
self.U = U
self.b = b
assert mask is not None
n_steps = x.shape[0]
if x.ndim == 3:
n_samples = x.shape[1]
else:
n_samples = 1
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_):
preact = T.dot(h_, self.U)
preact += x_
z = T.nnet.sigmoid(_slice(preact, 0, hidden_size))
r = T.nnet.sigmoid(_slice(preact, 1, hidden_size))
c = T.tanh(_slice(preact, 2, hidden_size) * r + (T.ones_like(r) - r) * _slice(x_, 2, hidden_size))
h = (T.ones_like(z) - z) * c + z * h_
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
input = (T.dot(x, self.W) + self.b)
rval, updates = theano.scan(_step,
sequences=[mask, input],
outputs_info=[T.alloc(numpy_floatX(0.), n_samples, hidden_size)],
name=prefix+'_scan',
n_steps=n_steps)
if mean_pooling:
hidden_sum = (rval * mask[:, :, None]).sum(axis=0)
self.output = hidden_sum / mask.sum(axis=0)[:, None]
else:
self.output = rval[-1, :, :]
self.out_all = rval
class DMN_GRU(object):
def __init__(self, x, l, mask=None, hidden_size=400, mean_pooling=False, prefix='dmn_gru_'):
"""attention, every column in input is a sample"""
def random_weights(x_dim, y_dim):
return np.random.uniform(
low=-np.sqrt(6. / (x_dim + y_dim)),
high=np.sqrt(6. / (x_dim + y_dim)),
size=(x_dim, y_dim)
).astype(theano.config.floatX)
self.W = theano.shared(
value=np.concatenate(
[random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix+'W',
borrow=True
)
self.W0 = theano.shared(
value=random_weights(hidden_size, hidden_size),
name=prefix+'W0',
borrow=True
)
self.W1 = theano.shared(
value=random_weights(4 * hidden_size, hidden_size),
name=prefix + 'W1',
borrow=True
)
self.W2 = theano.shared(
value=np.random.uniform(
low=-np.sqrt(6. / hidden_size),
high=np.sqrt(6. / hidden_size),
size=(hidden_size, )
).astype(theano.config.floatX),
name=prefix + 'W2',
borrow=True
)
self.U = theano.shared(
value=np.concatenate(
[random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix+'U',
borrow=True
)
self.b = theano.shared(
value=np.zeros((3 * hidden_size,)).astype(theano.config.floatX),
name=prefix+'b',
borrow=True
)
self.b1 = theano.shared(
value=np.zeros(
shape=(hidden_size,),
dtype=theano.config.floatX
),
name=prefix + 'b1',
borrow=True
)
self.b2 = theano.shared(
value=numpy_floatX(0.),
name=prefix + 'b2',
borrow=True
)
assert mask is not None
n_steps = x.shape[0]
if x.ndim == 3:
n_samples = x.shape[1]
else:
n_samples = 1
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_):
preact = T.dot(h_, self.U)
preact += x_
_x = _slice(x_, 2, hidden_size)
_z = T.concatenate([_x, l, _x * l, T.abs_(_x - l)], axis=1) # T.dot(T.dot(T.transpose(x_), self.W0), l)
g = T.nnet.sigmoid(T.dot(T.tanh(T.dot(_z, self.W1) + self.b1), self.W2) + self.b2)
z = T.nnet.sigmoid(_slice(preact, 0, hidden_size))
r = T.nnet.sigmoid(_slice(preact, 1, hidden_size))
c = T.tanh(_slice(preact, 2, hidden_size) * r + (T.ones_like(r) - r) * _slice(x_, 2, hidden_size))
_h = (T.ones_like(z) - z) * c + z * h_
h = T.batched_dot(g, _h) + T.batched_dot(T.ones_like(g) - g, h_)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
input = (T.dot(x, self.W) + self.b)
rval, updates = theano.scan(_step,
sequences=[mask, input],
outputs_info=[T.alloc(numpy_floatX(0.), n_samples, hidden_size)],
name=prefix+'_scan',
n_steps=n_steps)
if mean_pooling:
hidden_sum = (rval * mask[:, :, None]).sum(axis=0)
self.output = hidden_sum / mask.sum(axis=0)[:, None]
else:
self.output = rval[-1, :, :]
self.out_all = rval
self.params = {prefix + 'W': self.W,
prefix + 'U': self.U,
prefix + 'b': self.b,
# prefix + 'W0': self.W0,
prefix + 'W1': self.W1,
prefix + 'W2': self.W2,
prefix + 'b1': self.b1,
prefix + 'b2': self.b2}
class KBMN_GRU(object):
def __init__(self, x, l, kbm, mask=None, hidden_size=400, mean_pooling=False, prefix='kbmn_gru_'):
"""attention, every column in input is a sample"""
self._init_params(kbm, hidden_size, prefix=prefix)
assert mask is not None
n_steps = x.shape[0]
if x.ndim == 3:
n_samples = x.shape[1]
else:
n_samples = 1
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_):
preact = T.dot(h_, self.U)
preact += x_
_x = _slice(x_, 2, hidden_size)
_k = T.dot(T.nnet.softmax(T.dot(_x, self.V)), kbm)
_z = T.concatenate([_x, _k, l, _x * l, _k * l, T.abs_(_x - l), T.abs_(_k - l)], axis=1) # T.dot(T.dot(T.transpose(x_), self.W0), l)
g = T.nnet.sigmoid(T.dot(T.tanh(T.dot(_z, self.W1) + self.b1), self.W2) + self.b2)
z = T.nnet.sigmoid(_slice(preact, 0, hidden_size))
r = T.nnet.sigmoid(_slice(preact, 1, hidden_size))
c = T.tanh(_slice(preact, 2, hidden_size) * r + (T.ones_like(r) - r) * _slice(x_, 2, hidden_size))
_h = (T.ones_like(z) - z) * c + z * h_
h = T.batched_dot(g, _h) + T.batched_dot(T.ones_like(g) - g, h_)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
input = (T.dot(x, self.W) + self.b)
rval, updates = theano.scan(_step,
sequences=[mask, input],
outputs_info=[T.alloc(numpy_floatX(0.), n_samples, hidden_size)],
name=prefix+'_scan',
n_steps=n_steps)
if mean_pooling:
hidden_sum = (rval * mask[:, :, None]).sum(axis=0)
self.output = hidden_sum / mask.sum(axis=0)[:, None]
else:
self.output = rval[-1, :, :]
self.out_all = rval
def _init_params(self, kbm, hidden_size, prefix):
def random_weights(x_dim, y_dim):
return np.random.uniform(
low=-np.sqrt(6. / (x_dim + y_dim)),
high=np.sqrt(6. / (x_dim + y_dim)),
size=(x_dim, y_dim)
).astype(theano.config.floatX)
n_kbitems = 198
self.V = theano.shared(
value=random_weights(hidden_size, n_kbitems),
name=prefix+'V',
borrow=True
)
self.W = theano.shared(
value=np.concatenate(
[random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix + 'W',
borrow=True
)
self.W0 = theano.shared(
value=random_weights(hidden_size, hidden_size),
name=prefix + 'W0',
borrow=True
)
self.W1 = theano.shared(
value=random_weights(7 * hidden_size, hidden_size),
name=prefix + 'W1',
borrow=True
)
self.W2 = theano.shared(
value=np.random.uniform(
low=-np.sqrt(6. / hidden_size),
high=np.sqrt(6. / hidden_size),
size=(hidden_size,)
).astype(theano.config.floatX),
name=prefix + 'W2',
borrow=True
)
self.U = theano.shared(
value=np.concatenate(
[random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size),
random_weights(hidden_size, hidden_size)],
axis=1
).astype(theano.config.floatX),
name=prefix + 'U',
borrow=True
)
self.b = theano.shared(
value=np.zeros((3 * hidden_size,)).astype(theano.config.floatX),
name=prefix + 'b',
borrow=True
)
self.b1 = theano.shared(
value=np.zeros(
shape=(hidden_size,),
dtype=theano.config.floatX
),
name=prefix + 'b1',
borrow=True
)
self.b2 = theano.shared(
value=numpy_floatX(0.),
name=prefix + 'b2',
borrow=True
)
self.params = {prefix + 'W': self.W,
prefix + 'U': self.U,
prefix + 'b': self.b,
# prefix + 'W0': self.W0,
prefix + 'W1': self.W1,
prefix + 'W2': self.W2,
prefix + 'b1': self.b1,
prefix + 'b2': self.b2,
prefix + 'V': self.V}
| |
"""
SQLAlchemy-JSONAPI Serializer.
Colton J. Provias - cj@coltonprovias.com
http://github.com/coltonprovias/sqlalchemy-jsonapi
Licensed with MIT License
"""
from functools import wraps
from sqlalchemy.orm.base import MANYTOONE, ONETOMANY
def as_relationship(to_many=False, linked_key=None, link_key=None,
columns=[]):
"""
Turn a method into a pseudo-relationship for serialization.
Arguments:
- to_many: Whether the relationship is to-many or to-one.
- linked_key: The key used in the linked section of the serialized data
- link_key: The key used in the link section in the model's serialization
- columns: Columns tied to this relationship
"""
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
if to_many:
wrapped.direction = ONETOMANY
else:
wrapped.direction = MANYTOONE
wrapped.key = link_key or wrapped.__name__
wrapped.linked_key = linked_key or wrapped.key
wrapped.local_columns = columns
return wrapped
return wrapper
class JSONAPIMixin:
""" Mixin that enables serialization of a model. """
# Columns to be excluded from serialization
jsonapi_columns_exclude = []
# Extra columns to be included with serialization
jsonapi_columns_include = []
# Hook for overriding column data
jsonapi_columns_override = {}
# Relationships to be excluded from serialization
jsonapi_relationships_exclude = []
# Extra relationships to be included with serialization
jsonapi_relationships_include = []
# Hook for overriding relationships
jsonapi_relationships_override = {}
def id(self):
""" JSON API recommends having an id for each resource. """
raise NotImplemented
def jsonapi_can_view(self):
""" Return True if this model can be serialized. """
return True
class SkipType(object):
""" Used for skipping types during conversion. """
pass
class JSONAPI:
""" The main JSONAPI serializer class. """
# A dictionary of converters for serialization
converters = {}
def __init__(self, model):
"""
Create a serializer object.
Arguments:
- model: Should be a SQLAlchemy model class.
"""
self.model = model
def inflector(self, to_inflect):
"""
Format text for use in keys in serialization.
Override this if you need to meet requirements on your front-end.
Arguments:
- to_inflect: The string to be inflected
Returns the altered string.
"""
return to_inflect
def convert(self, item, to_convert):
"""
Convert from Python objects to JSON-friendly values.
Arguments:
- item: A SQLAlchemy model instance
- to_convert: Python object to be converted
Returns either a string, int, float, bool, or SkipType.
"""
if to_convert is None:
return None
if isinstance(to_convert, (str, int, float, bool)):
return to_convert
if callable(to_convert):
return to_convert(item)
if self.converters[type(to_convert).__name__] is not None:
converter = self.converters[type(to_convert).__name__]
return converter(to_convert)
return SkipType
def get_api_key(self, model):
"""
Generate a key for a model.
Arguments:
- model: SQLAlchemy model instance
Returns an inflected key that is generated from jsonapi_key or from
__tablename__.
"""
api_key = getattr(model, 'jsonapi_key', model.__tablename__)
return self.inflector(api_key)
def sort_query(self, model, query, sorts):
"""
Sort a query based upon provided sorts.
Arguments:
- model: SQLAlchemy model class
- query: Instance of Query or AppenderQuery
- sorts: A dictionary of sorts keyed by the api_key for each model
Returns a query with appropriate order_by appended.
"""
if sorts is None:
return query
api_key = self.get_api_key(model)
for sort in sorts[api_key]:
if sort.startswith('-'):
sort_by = getattr(model, sort[1:]).desc()
else:
sort_by = getattr(model, sort)
query = query.order_by(sort_by)
return query
def parse_include(self, include):
"""
Parse the include query parameter.
Arguments:
- include: A list of resources to be included by link_keys
Returns a dictionary of the parsed include list. A None value
signifies that the resource itself should be dumped.
"""
ret = {}
for item in include:
if '.' in item:
local, remote = item.split('.', maxsplit=1)
else:
local = item
remote = None
if local not in ret.keys():
ret[local] = []
ret[local].append(remote)
return ret
def dump_column_data(self, item, fields):
"""
Dump the data from the colums of a model instance.
Arguments:
- item: An SQLAlchemy model instance
- fields: A list of requested fields. If it is None, all available
fields will be returned.
Returns a dictionary representing the instance's data.
"""
obj = dict()
columns = list(item.__table__.columns)
column_data = dict()
api_key = self.get_api_key(item)
for column in columns:
if column.name in item.jsonapi_columns_exclude:
continue
column_data[column.name] = getattr(item, column.name)
for column in item.jsonapi_columns_include:
column_data[column] = getattr(item, column)
column_data.update(item.jsonapi_columns_override)
for name, value in column_data.items():
key = self.inflector(name)
if key != 'id' and fields is not None and \
api_key in fields.keys() and \
key not in fields[api_key]:
continue
converted = self.convert(item, value)
if converted != SkipType:
obj[key] = converted
return obj
def dump_relationship_data(self, item, obj, depth, fields, sort, include):
"""
Handle relationship dumping for a model.
Arguments:
- item: SQLAlchemy model instance
- obj: Column data for the model post-dump
- depth: How much deeper into the relationships do we have to go
captain?
- fields: A dictionary of fields to be parsed based on linked_keys.
- sort: A dictionary of fields to sort by
- include: A list of resources to be included by link_keys.
"""
relationships = dict(list(map((lambda x: (x.key, x)),
item.__mapper__.relationships)))
for key in item.jsonapi_relationships_exclude:
if key not in relationships.keys():
continue
del relationships[key]
for key in item.jsonapi_relationships_include:
relationships[key] = getattr(item, key)
for key, value in item.jsonapi_relationships_override:
relationships[key] = getattr(item, value)
if include is not None:
include = self.parse_include(include)
obj['links'] = {}
linked = {}
for key, relationship in relationships.items():
dump_this = True
link_key = self.inflector(key)
if hasattr(relationship, 'mapper'):
mapper = relationship.mapper.class_
linked_key = self.inflector(getattr(mapper, 'jsonapi_key',
mapper.__tablename__))
else:
linked_key = self.inflector(relationship.linked_key)
if relationship.direction == MANYTOONE:
for column in relationship.local_columns:
if isinstance(column, str):
col_name = self.inflector(column)
else:
col_name = self.inflector(column.name)
if col_name in obj.keys():
obj['links'][link_key] = self.convert(item,
obj[col_name])
del obj[col_name]
if include is not None:
if link_key not in include.keys():
continue
local_include = include[link_key]
if None in include[link_key]:
local_include.remove(None)
else:
dump_this = False
else:
local_include = None
if depth > 0 or (include is not None and
local_include is not None):
if callable(relationship):
related = relationship()
else:
related = getattr(item, relationship.key)
if relationship.direction == MANYTOONE:
if isinstance(related, JSONAPIMixin):
if not related.jsonapi_can_view():
continue
if dump_this and linked_key not in linked.keys():
linked[linked_key] = {}
r_obj, r_lnk = self.dump_object(related, depth - 1,
fields, sort,
local_include)
linked.update(r_lnk)
if dump_this:
linked[linked_key][str(r_obj['id'])] = r_obj
else:
if sort is not None and linked_key in sort.keys():
related = self.sort_query(mapper, related, sort)
if link_key not in obj['links'].keys():
obj['links'][link_key] = []
for local_item in list(related):
if not isinstance(local_item, JSONAPIMixin):
continue
if not local_item.jsonapi_can_view():
continue
if dump_this and linked_key not in linked.keys():
linked[linked_key] = {}
obj['links'][link_key].append(str(local_item.id))
r_obj, r_lnk = self.dump_object(local_item, depth - 1,
fields, sort,
local_include)
linked.update(r_lnk)
if dump_this:
linked[linked_key][str(r_obj['id'])] = r_obj
return obj, linked
def dump_object(self, item, depth, fields, sort, include):
"""
Quick, simple way of coordinating a dump.
Arguments:
- item: Instance of a SQLAlchemy model
- depth: Integer of how deep relationships should be queried
- fields: Dictionary of fields to be returned, keyed by linked_keys
- sort: Dictionary of fields to sory by, keyed by linked_keys
- include: List of resources to side-load by link_keys.
"""
obj = self.dump_column_data(item, fields)
return self.dump_relationship_data(item, obj, depth, fields, sort,
include)
def serialize(self, to_serialize, depth=1, fields=None, sort=None,
include=None):
"""
Perform the serialization to dictionary in JSON API format.
Arguments:
- to_serialize: The query, collection, or instance to serialize.
- depth: How deep to side-load relationships. If include is provided,
this will be overridden
- fields: Dictionary of fields to be returned keyed by linked_keys or
a list of fields for the current instance
- sort: Dictionary of fields to sort by keyed by linked_keys or a list
of fields to sort by for the current instance
- include: List of resources to side-load by link_keys.
"""
api_key = self.get_api_key(self.model)
to_return = {api_key: [], 'linked': {}, 'meta': {}}
linked = dict()
if isinstance(to_serialize, JSONAPIMixin):
is_single = True
to_serialize = [to_serialize]
else:
is_single = False
if isinstance(fields, list):
fields = {api_key: fields}
if isinstance(sort, list):
sort = {api_key: sort}
if not is_single:
to_serialize = self.sort_query(self.model, to_serialize, sort)
for item in to_serialize:
if not item.jsonapi_can_view():
continue
dumped = self.dump_object(item, depth, fields, sort, include)
if dumped is None:
continue
obj, new_linked = dumped
to_return[api_key].append(obj)
for key in new_linked.keys():
if key not in linked.keys():
linked[key] = dict()
linked[key].update(new_linked[key])
for key in linked.keys():
to_return['linked'][key] = list(linked[key].values())
if is_single:
to_return[api_key] = to_return[api_key][0]
return to_return
| |
from __future__ import unicode_literals
from operator import attrgetter
from django import VERSION
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db import models, router
from django.db.models.fields import Field
from django.db.models.fields.related import (add_lazy_relation, ManyToManyRel,
OneToOneRel, RelatedField)
if VERSION < (1, 8):
# related.py was removed in Django 1.8
# Depending on how Django was updated, related.py could still exist
# on the users system even on Django 1.8+, so we check the Django
# version before importing it to make sure this doesn't get imported
# accidentally.
from django.db.models.related import RelatedObject
else:
RelatedObject = None
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import GenericTaggedItemBase, TaggedItem
from taggit.utils import _get_field, require_instance_manager
try:
from django.contrib.contenttypes.fields import GenericRelation
except ImportError: # django < 1.7
from django.contrib.contenttypes.generic import GenericRelation
try:
from django.db.models.query_utils import PathInfo
except ImportError: # Django < 1.8
try:
from django.db.models.related import PathInfo
except ImportError:
pass # PathInfo is not used on Django < 1.6
def _model_name(model):
if VERSION < (1, 7):
return model._meta.module_name
else:
return model._meta.model_name
class TaggableRel(ManyToManyRel):
def __init__(self, field, related_name, through, to=None):
self.to = to
self.related_name = related_name
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None if VERSION < (1, 7) else through
self.field = field
self.through_fields = None
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
class ExtraJoinRestriction(object):
"""
An extra restriction used for contenttype restriction in joins.
"""
def __init__(self, alias, col, content_types):
self.alias = alias
self.col = col
self.content_types = content_types
def as_sql(self, qn, connection):
if len(self.content_types) == 1:
extra_where = "%s.%s = %%s" % (qn(self.alias), qn(self.col))
else:
extra_where = "%s.%s IN (%s)" % (qn(self.alias), qn(self.col),
','.join(['%s'] * len(self.content_types)))
return extra_where, self.content_types
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(self.alias, self.col, self.content_types[:])
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance, prefetch_cache_name):
self.through = through
self.model = model
self.instance = instance
self.prefetch_cache_name = prefetch_cache_name
self._db = None
def is_cached(self, instance):
return self.prefetch_cache_name in instance._prefetched_objects_cache
def get_queryset(self, extra_filters=None):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
kwargs = extra_filters if extra_filters else {}
return self.through.tags_for(self.model, self.instance, **kwargs)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
instance = instances[0]
from django.db import connections
db = self._db or router.db_for_read(instance.__class__, instance=instance)
fieldname = ('object_id' if issubclass(self.through, GenericTaggedItemBase)
else 'content_object')
fk = self.through._meta.get_field(fieldname)
query = {
'%s__%s__in' % (self.through.tag_relname(), fk.name):
set(obj._get_pk_val() for obj in instances)
}
join_table = self.through._meta.db_table
source_col = fk.column
connection = connections[db]
qn = connection.ops.quote_name
qs = self.get_queryset(query).using(db).extra(
select={
'_prefetch_related_val': '%s.%s' % (qn(join_table), qn(source_col))
}
)
return (qs,
attrgetter('_prefetch_related_val'),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
# Django < 1.6 uses the previous name of query_set
get_query_set = get_queryset
get_prefetch_query_set = get_prefetch_queryset
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, user, group=None, *tags):
str_tags = set()
tag_objs = set()
for t in tags:
if isinstance(t, self.through.tag_model()):
tag_objs.add(t)
elif isinstance(t, six.string_types):
str_tags.add(t)
else:
raise ValueError("Cannot add {0} ({1}). Expected {2} or str.".format(
t, type(t), type(self.through.tag_model())))
if getattr(settings, 'TAGGIT_CASE_INSENSITIVE', False):
# Some databases can do case-insensitive comparison with IN, which
# would be faster, but we can't rely on it or easily detect it.
existing = []
tags_to_create = []
for name in str_tags:
try:
tag = self.through.tag_model().objects.get(user=user,
group=group,
name__iexact=name)
existing.append(tag)
except self.through.tag_model().DoesNotExist:
tags_to_create.append(name)
else:
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
user=user,
group=group,
name__in=str_tags
)
tags_to_create = str_tags - set(t.name for t in existing)
tag_objs.update(existing)
for new_tag in tags_to_create:
tag_objs.add(self.through.tag_model().objects.create(user=user, group=group, name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def names(self, user, group=None):
return self.get_queryset().filter(user=user, group=group).values_list('name', flat=True)
@require_instance_manager
def slugs(self, user, group):
return self.get_queryset().filter(user=user, group=group).values_list('slug', flat=True)
@require_instance_manager
def set(self, user, group, *tags):
self.clear(user, group)
self.add(user, group, *tags)
@require_instance_manager
def remove(self, user, group, *tags):
self.through.objects.filter(tag__user=user, tag__group=group).filter(
**self._lookup_kwargs()).filter(tag__name__in=tags).delete()
@require_instance_manager
def clear(self, user, group):
self.through.objects.filter(user=user, group=group).filter(**self._lookup_kwargs()).delete()
def most_common(self, user, group):
return self.get_query_set().filter(user=user, group=group).annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self, user, group):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.filter(tag__user=user, tag__group=group).values(*six.iterkeys(lookup_kwargs))
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
qs = qs.filter(tag__in=self.all())
qs = qs.order_by('-n')
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = _get_field(self.through, lookup_keys[0])
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.items():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
# _TaggableManager needs to be hashable but BaseManagers in Django 1.8+ overrides
# the __eq__ method which makes the default __hash__ method disappear.
# This checks if the __hash__ attribute is None, and if so, it reinstates the original method.
if models.Manager.__hash__ is None:
__hash__ = object.__hash__
class TaggableManager(RelatedField, Field):
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
_related_name_counter = 0
def __init__(self, verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."),
through=None, blank=False, related_name=None, to=None,
manager=_TaggableManager):
self.through = through or TaggedItem
self.swappable = False
self.manager = manager
rel = TaggableRel(self, related_name, self.through, to=to)
Field.__init__(
self,
verbose_name=verbose_name,
help_text=help_text,
blank=blank,
null=True,
serialize=False,
rel=rel,
)
# NOTE: `to` is ignored, only used via `deconstruct`.
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = self.manager(
through=self.through,
model=model,
instance=instance,
prefetch_cache_name=self.name
)
return manager
def deconstruct(self):
"""
Deconstruct the object, used with migrations.
"""
name, path, args, kwargs = super(TaggableManager, self).deconstruct()
# Remove forced kwargs.
for kwarg in ('serialize', 'null'):
del kwargs[kwarg]
# Add arguments related to relations.
# Ref: https://github.com/alex/django-taggit/issues/206#issuecomment-37578676
if isinstance(self.rel.through, six.string_types):
kwargs['through'] = self.rel.through
elif not self.rel.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (self.rel.through._meta.app_label, self.rel.through._meta.object_name)
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = '%s.%s' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
if VERSION < (1, 7):
self.name = self.column = self.attname = name
else:
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.rel.to, six.string_types):
def resolve_related_class(field, model, cls):
field.rel.to = model
add_lazy_relation(cls, self, self.rel.to, resolve_related_class)
if isinstance(self.through, six.string_types):
def resolve_related_class(field, model, cls):
self.through = model
self.rel.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def get_internal_type(self):
return 'ManyToManyField'
def __lt__(self, other):
"""
Required contribute_to_class as Django uses bisect
for ordered class contribution and bisect requires
a orderable type in py3.
"""
return False
def post_through_setup(self, cls):
if RelatedObject is not None: # Django < 1.8
self.related = RelatedObject(cls, self.model, self)
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
if not self.rel.to:
self.rel.to = self.through._meta.get_field("tag").rel.to
if RelatedObject is not None: # Django < 1.8
self.related = RelatedObject(self.through, cls, self)
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, 'tagged_items')
for rel in cls._meta.local_many_to_many:
if rel == self or not isinstance(rel, TaggableManager):
continue
if rel.through == self.through:
raise ValueError('You can\'t have two TaggableManagers with the'
' same through model.')
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return _model_name(self.model)
def m2m_reverse_name(self):
return _get_field(self.through, 'tag').column
def m2m_reverse_field_name(self):
return _get_field(self.through, 'tag').name
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def bulk_related_objects(self, new_objs, using):
return []
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos - 2])
get = ContentType.objects.get_for_model
cts = [get(obj) for obj in _get_subclasses(self.model)]
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def get_extra_join_sql(self, connection, qn, lhs_alias, rhs_alias):
model_name = _model_name(self.through)
if rhs_alias == '%s_%s' % (self.through._meta.app_label, model_name):
alias_to_join = rhs_alias
else:
alias_to_join = lhs_alias
extra_col = _get_field(self.through, 'content_type').column
content_type_ids = [ContentType.objects.get_for_model(subclass).pk for
subclass in _get_subclasses(self.model)]
if len(content_type_ids) == 1:
content_type_id = content_type_ids[0]
extra_where = " AND %s.%s = %%s" % (qn(alias_to_join),
qn(extra_col))
params = [content_type_id]
else:
extra_where = " AND %s.%s IN (%s)" % (qn(alias_to_join),
qn(extra_col),
','.join(['%s'] *
len(content_type_ids)))
params = content_type_ids
return extra_where, params
# This and all the methods till the end of class are only used in django >= 1.6
def _get_mm_case_path_info(self, direct=False):
pathinfos = []
linkfield1 = _get_field(self.through, 'content_object')
linkfield2 = _get_field(self.through, self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def _get_gfk_case_path_info(self, direct=False):
pathinfos = []
from_field = self.model._meta.pk
opts = self.through._meta
object_id_field = _get_field(self.through, 'object_id')
linkfield = _get_field(self.through, self.m2m_reverse_field_name())
if direct:
join1infos = [PathInfo(self.model._meta, opts, [from_field], self.rel, True, False)]
join2infos = linkfield.get_path_info()
else:
join1infos = linkfield.get_reverse_path_info()
join2infos = [PathInfo(opts, self.model._meta, [object_id_field], self, True, False)]
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
if self.use_gfk:
return self._get_gfk_case_path_info(direct=True)
else:
return self._get_mm_case_path_info(direct=True)
def get_reverse_path_info(self):
if self.use_gfk:
return self._get_gfk_case_path_info(direct=False)
else:
return self._get_mm_case_path_info(direct=False)
def get_joining_columns(self, reverse_join=False):
if reverse_join:
return ((self.model._meta.pk.column, "object_id"),)
else:
return (("object_id", self.model._meta.pk.column),)
def get_extra_restriction(self, where_class, alias, related_alias):
extra_col = _get_field(self.through, 'content_type').column
content_type_ids = [ContentType.objects.get_for_model(subclass).pk
for subclass in _get_subclasses(self.model)]
return ExtraJoinRestriction(related_alias, extra_col, content_type_ids)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
@property
def related_fields(self):
return [(_get_field(self.through, 'object_id'), self.model._meta.pk)]
@property
def foreign_related_fields(self):
return [self.related_fields[0][1]]
def _get_subclasses(model):
subclasses = [model]
if VERSION < (1, 8):
all_fields = (_get_field(model, f) for f in model._meta.get_all_field_names())
else:
all_fields = model._meta.get_fields()
for field in all_fields:
# Django 1.8 +
if (not RelatedObject and isinstance(field, OneToOneRel) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.related_model))
# < Django 1.8
if (RelatedObject and isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
# `total_ordering` does not exist in Django 1.4, as such
# we special case this import to be py3k specific which
# is not supported by Django 1.4
if six.PY3:
from django.utils.functional import total_ordering
TaggableManager = total_ordering(TaggableManager)
| |
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.finalizer.tagging_s3
======================================
s3 tagging image finalizer
"""
import logging
from shutil import rmtree
from os.path import isdir
from os import makedirs, system
from os import environ
from aminator.config import conf_action
from aminator.exceptions import VolumeException
from aminator.plugins.finalizer.tagging_base import TaggingBaseFinalizerPlugin
from aminator.util import randword
from aminator.util.linux import sanitize_metadata, monitor_command
from aminator.util.metrics import cmdsucceeds, cmdfails, timer
__all__ = ('TaggingS3FinalizerPlugin',)
log = logging.getLogger(__name__)
class TaggingS3FinalizerPlugin(TaggingBaseFinalizerPlugin):
_name = 'tagging_s3'
def add_plugin_args(self):
tagging = super(TaggingS3FinalizerPlugin, self).add_plugin_args()
context = self._config.context
tagging.add_argument('-n', '--name', dest='name', action=conf_action(context.ami), help='name of resultant AMI (default package_name-version-release-arch-yyyymmddHHMM-s3')
tagging.add_argument('--cert', dest='cert', action=conf_action(context.ami), help='The path to the PEM encoded RSA public key certificate file for ec2-bundle-volume')
tagging.add_argument('--privatekey', dest='privatekey', action=conf_action(context.ami), help='The path to the PEM encoded RSA private key file for ec2-bundle-vol')
tagging.add_argument('--ec2-user', dest='ec2_user', action=conf_action(context.ami), help='ec2 user id for ec2-bundle-vol')
tagging.add_argument('--tmpdir', dest='tmpdir', action=conf_action(context.ami), help='temp directory used by ec2-bundle-vol')
tagging.add_argument('--bucket', dest='bucket', action=conf_action(context.ami), help='the S3 bucket to use for ec2-upload-bundle')
tagging.add_argument('--break-copy-volume', dest='break_copy_volume', action=conf_action(context.ami, action='store_true'), help='break into shell after copying the volume, for debugging')
def _set_metadata(self):
super(TaggingS3FinalizerPlugin, self)._set_metadata()
context = self._config.context
config = self._config.plugins[self.full_name]
metadata = context.package.attributes
ami_name = context.ami.get('name', None)
if not ami_name:
ami_name = config.name_format.format(**metadata)
context.ami.name = sanitize_metadata('{0}-s3'.format(ami_name))
def tmpdir(self):
config = self._config.plugins[self.full_name]
ami = self._config.context.ami
return "{0}/{1}".format(ami.get("tmpdir", config.get("default_tmpdir", "/tmp")), ami.name)
# pylint: disable=access-member-before-definition
def unique_name(self):
context = self._config.context
if hasattr(self, "_unique_name"):
return self._unique_name
self._unique_name = "{0}-{1}".format(context.ami.name, randword(6))
return self._unique_name
def image_location(self):
return "{0}/{1}".format(self.tmpdir(), self.unique_name())
@cmdsucceeds("aminator.finalizer.tagging_s3.copy_volume.count")
@cmdfails("aminator.finalizer.tagging_s3.copy_volume.error")
@timer("aminator.finalizer.tagging_s3.copy_volume.duration")
def _copy_volume(self):
context = self._config.context
tmpdir = self.tmpdir()
if not isdir(tmpdir):
makedirs(tmpdir)
return monitor_command(["dd", "bs=65536", "if={0}".format(context.volume.dev), "of={0}".format(self.image_location())])
@cmdsucceeds("aminator.finalizer.tagging_s3.bundle_image.count")
@cmdfails("aminator.finalizer.tagging_s3.bundle_image.error")
@timer("aminator.finalizer.tagging_s3.bundle_image.duration")
def _bundle_image(self):
context = self._config.context
config = self._config.plugins[self.full_name]
block_device_map = config.default_block_device_map
root_device = config.default_root_device
bdm = "root={0}".format(root_device)
for bd in block_device_map:
bdm += ",{0}={1}".format(bd[1], bd[0])
bdm += ",ami={0}".format(root_device)
cmd = ['ec2-bundle-image']
cmd.extend(['-c', context.ami.get("cert", config.default_cert)])
cmd.extend(['-k', context.ami.get("privatekey", config.default_privatekey)])
cmd.extend(['-u', context.ami.get("ec2_user", str(config.default_ec2_user))])
cmd.extend(['-i', self.image_location()])
cmd.extend(['-d', self.tmpdir()])
if context.base_ami.architecture:
cmd.extend(['-r', context.base_ami.architecture])
vm_type = context.ami.get("vm_type", "paravirtual")
if vm_type == "paravirtual":
if context.base_ami.kernel_id:
cmd.extend(['--kernel', context.base_ami.kernel_id])
if context.base_ami.ramdisk_id:
cmd.extend(['--ramdisk', context.base_ami.ramdisk_id])
cmd.extend(['-B', bdm])
return monitor_command(cmd)
@cmdsucceeds("aminator.finalizer.tagging_s3.upload_bundle.count")
@cmdfails("aminator.finalizer.tagging_s3.upload_bundle.error")
@timer("aminator.finalizer.tagging_s3.upload_bundle.duration")
def _upload_bundle(self):
context = self._config.context
provider = self._cloud._connection.provider
ak = provider.get_access_key()
sk = provider.get_secret_key()
tk = provider.get_security_token()
cmd = ['ec2-upload-bundle']
cmd.extend(['-b', context.ami.bucket])
cmd.extend(['-a', ak])
cmd.extend(['-s', sk])
if tk:
cmd.extend(['-t', tk])
cmd.extend(['-m', "{0}.manifest.xml".format(self.image_location())])
cmd.extend(['--retry'])
return monitor_command(cmd)
def _register_image(self):
context = self._config.context
log.info('Registering image')
if not self._cloud.register_image(manifest="{0}/{1}.manifest.xml".format(context.ami.bucket, self.unique_name())):
return False
log.info('Registration success')
return True
def finalize(self):
log.info('Finalizing image')
context = self._config.context
self._set_metadata()
ret = self._copy_volume()
if not ret.success:
log.debug('Error copying volume, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
if context.ami.get('break_copy_volume', False):
system("bash")
ret = self._bundle_image()
if not ret.success:
log.debug('Error bundling image, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
ret = self._upload_bundle()
if not ret.success:
log.debug('Error uploading bundled volume, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
if not self._register_image():
log.critical('Error registering image')
return False
if not self._add_tags(['ami']):
log.critical('Error adding tags')
return False
log.info('Image registered and tagged')
self._log_ami_metadata()
return True
def __enter__(self):
context = self._config.context
volume_size = context.ami.get('root_volume_size', None)
if volume_size is None:
volume_size = self._cloud.plugin_config.get('root_volume_size', None)
if volume_size is not None:
volume_size = int(volume_size)
if volume_size > int(self.plugin_config.max_root_volume_size):
raise VolumeException(
'Requested root volume size {} exceeds 10G maximum for '
'S3-backed AMIs'.format(volume_size))
environ["AMINATOR_STORE_TYPE"] = "s3"
if context.ami.get("name", None):
environ["AMINATOR_AMI_NAME"] = context.ami.name
if context.ami.get("cert", None):
environ["AMINATOR_CERT"] = context.ami.cert
if context.ami.get("privatekey", None):
environ["AMINATOR_PRIVATEKEY"] = context.ami.privatekey
if context.ami.get("ec2_user", None):
environ["AMINATOR_EC2_USER"] = context.ami.ec2_user
if context.ami.get("tmpdir", None):
environ["AMINATOR_TMPDIR"] = context.ami.tmpdir
if context.ami.get("bucket", None):
environ["AMINATOR_BUCKET"] = context.ami.bucket
return super(TaggingS3FinalizerPlugin, self).__enter__()
def __exit__(self, exc_type, exc_value, trace):
if exc_type:
log.debug('Exception encountered in tagging s3 finalizer context manager',
exc_info=(exc_type, exc_value, trace))
# delete tmpdir used by ec2-bundle-vol
try:
td = self.tmpdir()
if isdir(td):
rmtree(td)
except Exception:
log.debug('Exception encountered attempting to clean s3 bundle tmpdir',
exc_info=True)
return False
| |
import threading
try:
import Queue as queue
except ImportError:
import queue
from collections import deque
from time import sleep, time
from xmlreader import XMLReader
import helpers
from helpers import Struct
from math import sqrt
import sys
import pose
import simobject
import supervisor
import gc
PAUSE = 0
RUN = 1
# RUN_ONCE = 2 # This loop cannot run once
DRAW_ONCE = 3
class PCLoop(threading.Thread):
"""The PCLoop manages the connection between an external robot and a locally running
supervisor. It also tries to draw a part of the world, using the supplied *renderer*.
This loop only supports one robot per world file and no obstacles.
The simulator runs in a separate thread. None of its functions are thread-safe,
and should never be called directly from other objects (except for the functions
inherited from `threading.Thread`). The communication with the simulator
should be done through its *in_queue* and *out_queue*. See :ref:`ui-sim-queue`.
:param renderer: The renderer that will be used to draw the world.
The simulator will assume control of the renderer.
The renderer functions also have to be considered thread-unsafe.
:type renderer: :class:`~renderer.Renderer`
:param in_queue: The queue that is used to send events to the simulator.
:type in_queue: :class:`Queue.Queue`
"""
__nice_colors = (0x55AAEE, 0x66BB22, 0xFFBB22, 0xCC66AA,
0x77CCAA, 0xFF7711, 0xFF5555, 0x55CC88)
def __init__(self, renderer, in_queue):
"""Create a simulator with *renderer* and *in_queue*
"""
super(PCLoop, self).__init__()
#Attributes
self.__stop = False
self.__state = PAUSE
self.__renderer = renderer
self.__center_on_robot = False
self.__orient_on_robot = False
self.__show_sensors = True
self.__draw_supervisors = False
self.__show_tracks = True
self.__in_queue = in_queue
self._out_queue = queue.Queue()
# Zoom on scene - Move to read_config later
self.__time = 0.0
# World objects
self.__robot = None
self.__tracker = None
self.__supervisor = None
self.__background = []
self.__zoom_default = 1
self.__world = None
self.__log_queue = deque()
def read_config(self, filename):
'''Load in the objects from the world XML file '''
self.log('reading initial configuration')
try:
self.__world = XMLReader(filename, 'simulation').read()
except Exception as e:
raise Exception('[PCLoop.read_config] Failed to parse ' + filename \
+ ': ' + str(e))
else:
self.__supervisor_param_cache = None
self.__center_on_robot = False
if self.__robot is not None:
r = self.__robot
self.__robot = None
del r
del self.__supervisor
self.__supervisor = None
gc.collect(r)
print(gc.get_referrers(r))
self.__construct_world()
def __construct_world(self):
"""Creates objects previously loaded from the world xml file.
This function uses the world in ``self.__world``.
All the objects will be created anew, including robots and supervisors.
All of the user's code is reloaded.
"""
if self.__world is None:
return
helpers.unload_user_modules()
self.__state = DRAW_ONCE
if self.__robot is not None:
del self.__robot
self.__robot = None
del self.__supervisor
self.__supervisor = None
del self.tracker
self.__background = []
for thing in self.__world:
if thing.type == 'robot' and self.__robot is None:
try:
robot_class = helpers.load_by_name(thing.robot.type,'robots')
if thing.robot.options is not None:
self.__robot = robot_class(thing.robot.pose, options = Struct(thing.robot.options))
else:
self.__robot = robot_class(thing.robot.pose)
self.__robot.set_logqueue(self.__log_queue)
if thing.robot.color is not None:
self.__robot.set_color(thing.robot.color)
else:
self.__robot.set_color(self.__nice_colors[0])
# Create supervisor
sup_class = helpers.load_by_name(thing.supervisor.type,'supervisors')
info = self.__robot.get_info()
info.color = self.__robot.get_color()
if thing.supervisor.options is not None:
self.__supervisor = sup_class(thing.robot.pose, info, options = Struct(thing.supervisor.options))
else:
self.__supervisor = sup_class(thing.robot.pose, info)
self.__supervisor.set_logqueue(self.__log_queue)
name = "Robot {}".format(sup_class.__name__)
if self.__supervisor_param_cache is not None:
self.__supervisor.set_parameters(self.__supervisor_param_cache)
self._out_queue.put(("make_param_window",
(self.__robot, name,
self.__supervisor.get_ui_description())))
# Create trackers
self.__tracker = simobject.Path(thing.robot.pose,self.__robot.get_color())
except:
self.log("[PCLoop.construct_world] Robot creation failed!")
if self.__robot is not None:
del self.__robot
self.__robot = None
self.__supervisor = None
gc.collect()
raise
#raise Exception('[PCLoop.construct_world] Unknown robot type!')
elif thing.type == 'marker':
if thing.polygon.color is None:
thing.polygon.color = 0x00FF00
self.__background.append(
simobject.Polygon(thing.polygon.pose,
thing.polygon.points,
thing.polygon.color))
else:
raise Exception('[PCLoop.construct_world] Unknown object: '
+ str(thing.type))
self.__time = 0.0
if self.__robot is None:
raise Exception('[PCLoop.construct_world] No robot specified!')
else:
self.__recalculate_default_zoom()
if not self.__center_on_robot:
self.focus_on_world()
self.__supervisor_param_cache = None
self.__state = DRAW_ONCE
self._out_queue.put(('reset',()))
def __recalculate_default_zoom(self):
"""Calculate the zoom level that will show the robot at about 10% its size
"""
xmin, ymin, xmax, ymax = self.__robot.get_bounds()
maxsize = sqrt(float(xmax-xmin)**2 + float(ymax-ymin)**2)
if maxsize == 0:
self.__zoom_default = 1
else:
self.__zoom_default = max(self.__renderer.size)/maxsize/10
def __reset_world(self):
"""Resets the world and objects to starting position.
All the user's code will be reloaded.
"""
if self.__world is None:
return
if self.__supervisor is not None:
self.__supervisor_param_cache = self.__supervisor.get_parameters()
del self.__supervisor
self.__supervisor = None
if self.__robot is not None:
del self.__robot
self.__robot = None
self.__construct_world()
def run(self):
"""Start the thread. In the beginning there's no world, no obstacles
and no robots.
The simulator will try to draw the world undependently of the
simulation status, so that the commands from the UI get processed.
"""
self.log('starting simulator thread')
time_constant = 0.02 # 20 milliseconds
self.__renderer.clear_screen() #create a white screen
self.__update_view()
self.__time = time()
while not self.__stop:
#
try:
self.__process_queue()
if self.__state == RUN:
# self.__time += time_constant
self.__robot.update_external_info()
self.fwd_logqueue()
new_time = time()
# Now calculate supervisor outputs for the new position
inputs = self.__supervisor.execute(self.__robot.get_info(), new_time - self.__time)
self.__time = new_time
self.fwd_logqueue()
self.__robot.set_inputs(inputs)
self.fwd_logqueue()
self.__robot.set_pose(self.__supervisor.pose_est)
self.__tracker.add_point(self.__supervisor.pose_est)
self.fwd_logqueue()
else:
sleep(time_constant)
# Draw to buffer-bitmap
if self.__state != PAUSE:
self.__draw()
if self.__state == DRAW_ONCE:
self.pause_simulation()
self.fwd_logqueue()
except RuntimeError as e:
self.log(str(e))
except Exception as e:
self._out_queue.put(("exception",sys.exc_info()))
self.pause_simulation()
self.fwd_logqueue()
def __draw(self):
"""Draws the world and items in it.
This will draw the markers, the obstacles,
the robots, their tracks and their sensors
"""
if self.__robot is not None and self.__center_on_robot:
if self.__orient_on_robot:
self.__renderer.set_screen_center_pose(self.__robot.get_pose())
else:
self.__renderer.set_screen_center_pose(pose.Pose(self.__robot.get_pose().x, self.__robot.get_pose().y, 0.0))
self.__renderer.clear_screen()
if self.__draw_supervisors and self.__supervisor is not None:
self.__supervisor.draw_background(self.__renderer)
for bg_object in self.__background:
bg_object.draw(self.__renderer)
# Draw the robot, tracker and sensors after obstacles
if self.__show_tracks and self.__tracker is not None:
self.__tracker.draw(self.__renderer)
if self.__robot is not None:
self.__robot.draw(self.__renderer)
if self.__show_sensors:
self.__robot.draw_sensors(self.__renderer)
if self.__draw_supervisors and self.__supervisor is not None:
self.__supervisor.draw_foreground(self.__renderer)
# update view
self.__update_view()
def __update_view(self):
"""Signal the UI that the drawing process is finished,
and it is safe to access the renderer.
"""
self._out_queue.put(('update_view',()))
self._out_queue.join() # wait until drawn
def __draw_once(self):
if self.__state == PAUSE:
self.__state = DRAW_ONCE
def refresh(self):
self.__draw_once()
def focus_on_world(self):
"""Scale the view to include all of the world (including robots)"""
def include_bounds(bounds, o_bounds):
xl, yb, xr, yt = bounds
xlo, ybo, xro, yto = o_bounds
if xlo < xl: xl = xlo
if xro > xr: xr = xro
if ybo < yb: yb = ybo
if yto > yt: yt = yto
return xl, yb, xr, yt
def bloat_bounds(bounds, factor):
xl, yb, xr, yt = bounds
w = xr-xl
h = yt-yb
factor = (factor-1)/2.0
return xl - w*factor, yb - h*factor, xr + w*factor, yt + h*factor
self.__center_on_robot = False
bounds = self.__robot.get_bounds()
for bgobject in self.__background:
bounds = include_bounds(bounds, bgobject.get_bounds())
xl, yb, xr, yt = bounds
self.__renderer.set_view_rect(xl,yb,xr-xl,yt-yb)
self.__draw_once()
def focus_on_robot(self, rotate = True):
"""Center the view on the robot and follow it.
If *rotate* is true, also follow the robot's orientation.
"""
self.__center_on_robot = True
self.__orient_on_robot = rotate
self.__draw_once()
def show_sensors(self, show = True):
"""Show or hide the robots' sensors on the simulation view
"""
self.__show_sensors = show
self.__draw_once()
def show_tracks(self, show = True):
"""Show/hide tracks for every robot on simulator view"""
self.__show_tracks = show
self.__draw_once()
def show_supervisors(self, show = True):
"""Show/hide the information from the supervisors"""
self.__draw_supervisors = show
self.__draw_once()
def show_grid(self, show=True):
"""Show/hide gridlines on simulator view"""
self.__renderer.show_grid(show)
self.__draw_once()
def adjust_zoom(self,factor):
"""Zoom the view by *factor*"""
self.__renderer.set_zoom_level(self.__zoom_default*factor)
self.__draw_once()
def apply_parameters(self,robot,parameters):
"""Apply *parameters* to the supervisor of *robot*.
The parameters have to correspond to the requirements of the supervisor,
as specified in :meth:`supervisor.Supervisor.get_ui_description`
"""
if self.__robot == robot:
self.__supervisor.set_parameters(parameters)
self.__draw_once()
else:
self.log("Robot not found")
# Stops the thread
def stop(self):
"""Stop the simulator thread when the entire program is closed"""
self.log('stopping simulator thread')
self.__stop = True
self._out_queue.put(('stopped',()))
def start_simulation(self):
"""Start/continue the simulation"""
if self.__robot is not None:
self.__robot.resume()
self.__state = RUN
self._out_queue.put(('running',()))
def pause_simulation(self):
"""Pause the simulation"""
if self.__robot is not None:
self.__robot.pause()
self.__state = PAUSE
self._out_queue.put(('paused',()))
def reset_simulation(self):
"""Reset the simulation to the start position"""
if self.__robot is not None:
self.__robot.reset()
self.__state = DRAW_ONCE
self.__reset_world()
### FIXME Those two functions are not thread-safe
def get_time(self):
"""Get the internal simulator time."""
return time() - self.__time
def is_running(self):
"""Get the simulation state as a `bool`"""
return self.__state == RUN
###------------------
def __process_queue(self):
"""Process external calls
"""
while not self.__in_queue.empty():
tpl = self.__in_queue.get()
if isinstance(tpl,tuple) and len(tpl) == 2:
name, args = tpl
if name in self.__class__.__dict__:
try:
self.__class__.__dict__[name](self,*args)
except TypeError:
self.log("Wrong simulator event parameters {}{}".format(name,args))
self._out_queue.put(("exception",sys.exc_info()))
except Exception as e:
self._out_queue.put(("exception",sys.exc_info()))
else:
self.log("Unknown simulator event '{}'".format(name))
else:
self.log("Wrong simulator event format '{}'".format(tpl))
self.__in_queue.task_done()
def log(self, message, obj=None):
if obj is None:
obj = self
print("{}: {}".format(obj.__class__.__name__,message))
self._out_queue.put(("log",(message,obj.__class__.__name__,None)))
def fwd_logqueue(self):
while self.__log_queue:
obj, message = self.__log_queue.popleft()
color = None
# Get the color
if isinstance(obj,simobject.SimObject):
color = obj.get_color()
elif isinstance(obj,supervisor.Supervisor):
color = obj.robot_color
self._out_queue.put(("log",(message,obj.__class__.__name__,color)))
#end class Simulator
| |
"""Probit regression class and diagnostics."""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import numpy.linalg as la
import scipy.optimize as op
from scipy.stats import norm, chisqprob
import scipy.sparse as SP
import user_output as USER
import summary_output as SUMMARY
from utils import spdot, spbroadcast
__all__ = ["Probit"]
class BaseProbit(object):
"""
Probit class to do all the computations
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance or spatial weights sparse matrix
aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
Note: Disregards the presence of dummies.
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseProbit((y>40).astype(float), x, w=w)
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
"""
def __init__(self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100):
self.y = y
self.x = x
self.n, self.k = x.shape
self.optim = optim
self.scalem = scalem
self.w = w
self.maxiter = maxiter
par_est, self.warning = self.par_est()
self.betas = np.reshape(par_est[0], (self.k, 1))
self.logl = -float(par_est[1])
@property
def vm(self):
try:
return self._cache['vm']
except AttributeError:
self._cache = {}
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
except KeyError:
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
return self._cache['vm']
@vm.setter
def vm(self, val):
try:
self._cache['vm'] = val
except AttributeError:
self._cache = {}
self._cache['vm'] = val
@property #could this get packaged into a separate function or something? It feels weird to duplicate this.
def z_stat(self):
try:
return self._cache['z_stat']
except AttributeError:
self._cache = {}
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
except KeyError:
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
return self._cache['z_stat']
@z_stat.setter
def z_stat(self, val):
try:
self._cache['z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['z_stat'] = val
@property
def slopes_std_err(self):
try:
return self._cache['slopes_std_err']
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
except KeyError:
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
return self._cache['slopes_std_err']
@slopes_std_err.setter
def slopes_std_err(self, val):
try:
self._cache['slopes_std_err'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = val
@property
def slopes_z_stat(self):
try:
return self._cache['slopes_z_stat']
except AttributeError:
self._cache = {}
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
except KeyError:
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
return self._cache['slopes_z_stat']
@slopes_z_stat.setter
def slopes_z_stat(self, val):
try:
self._cache['slopes_z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_z_stat'] = val
@property
def xmean(self):
try:
return self._cache['xmean']
except AttributeError:
self._cache = {}
try: #why is this try-accept? can x be a list??
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
except KeyError:
try:
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
return self._cache['xmean']
@xmean.setter
def xmean(self, val):
try:
self._cache['xmean'] = val
except AttributeError:
self._cache = {}
self._cache['xmean'] = val
@property
def xb(self):
try:
return self._cache['xb']
except AttributeError:
self._cache = {}
self._cache['xb'] = spdot(self.x, self.betas)
except KeyError:
self._cache['xb'] = spdot(self.x, self.betas)
return self._cache['xb']
@xb.setter
def xb(self, val):
try:
self._cache['xb'] = val
except AttributeError:
self._cache = {}
self._cache['xb'] = val
@property
def predy(self):
try:
return self._cache['predy']
except AttributeError:
self._cache = {}
self._cache['predy'] = norm.cdf(self.xb)
except KeyError:
self._cache['predy'] = norm.cdf(self.xb)
return self._cache['predy']
@predy.setter
def predy(self, val):
try:
self._cache['predy'] = val
except AttributeError:
self._cache = {}
self._cache['predy'] = val
@property
def predpc(self):
try:
return self._cache['predpc']
except AttributeError:
self._cache = {}
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
except KeyError:
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
return self._cache['predpc']
@predpc.setter
def predpc(self, val):
try:
self._cache['predpc'] = val
except AttributeError:
self._cache = {}
self._cache['predpc'] = val
@property
def phiy(self):
try:
return self._cache['phiy']
except AttributeError:
self._cache = {}
self._cache['phiy'] = norm.pdf(self.xb)
except KeyError:
self._cache['phiy'] = norm.pdf(self.xb)
return self._cache['phiy']
@phiy.setter
def phiy(self, val):
try:
self._cache['phiy'] = val
except AttributeError:
self._cache = {}
self._cache['phiy'] = val
@property
def scale(self):
try:
return self._cache['scale']
except AttributeError:
self._cache = {}
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
elif self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
except KeyError:
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
if self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
return self._cache['scale']
@scale.setter
def scale(self, val):
try:
self._cache['scale'] = val
except AttributeError:
self._cache = {}
self._cache['scale'] = val
@property
def slopes(self):
try:
return self._cache['slopes']
except AttributeError:
self._cache = {}
self._cache['slopes'] = self.betas[1:] * self.scale
except KeyError:
self._cache['slopes'] = self.betas[1:] * self.scale
return self._cache['slopes']
@slopes.setter
def slopes(self, val):
try:
self._cache['slopes'] = val
except AttributeError:
self._cache = {}
self._cache['slopes'] = val
@property
def slopes_vm(self):
try:
return self._cache['slopes_vm']
except AttributeError:
self._cache = {}
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
except KeyError:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
return self._cache['slopes_vm']
@slopes_vm.setter
def slopes_vm(self, val):
try:
self._cache['slopes_vm'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_vm'] = val
@property
def LR(self):
try:
return self._cache['LR']
except AttributeError:
self._cache = {}
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
except KeyError:
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
return self._cache['LR']
@LR.setter
def LR(self, val):
try:
self._cache['LR'] = val
except AttributeError:
self._cache = {}
self._cache['LR'] = val
@property
def u_naive(self):
try:
return self._cache['u_naive']
except AttributeError:
self._cache = {}
self._cache['u_naive'] = self.y - self.predy
except KeyError:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@u_naive.setter
def u_naive(self, val):
try:
self._cache['u_naive'] = val
except AttributeError:
self._cache = {}
self._cache['u_naive'] = val
@property
def u_gen(self):
try:
return self._cache['u_gen']
except AttributeError:
self._cache = {}
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
except KeyError:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@u_gen.setter
def u_gen(self, val):
try:
self._cache['u_gen'] = val
except AttributeError:
self._cache = {}
self._cache['u_gen'] = val
@property
def Pinkse_error(self):
try:
return self._cache['Pinkse_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@Pinkse_error.setter
def Pinkse_error(self, val):
try:
self._cache['Pinkse_error'] = val
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'] = val
@property
def KP_error(self):
try:
return self._cache['KP_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@KP_error.setter
def KP_error(self, val):
try:
self._cache['KP_error'] = val
except AttributeError:
self._cache = {}
self._cache['KP_error'] = val
@property
def PS_error(self):
try:
return self._cache['PS_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
@PS_error.setter
def PS_error(self, val):
try:
self._cache['PS_error'] = val
except AttributeError:
self._cache = {}
self._cache['PS_error'] = val
def par_est(self):
start = np.dot(la.inv(spdot(self.x.T, self.x)),
spdot(self.x.T, self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl, start, fgrad, fhess, self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(
flogl, start, fgrad, full_output=1, disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(
flogl, start, fgrad, fhess=fhess, full_output=1, disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
gradient = spdot(lamb.T, self.x)[0]
return gradient
def hessian(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
xb = spdot(self.x, beta)
qxb = q * xb
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
hessian = spdot(self.x.T, spbroadcast(self.x,-lamb * (lamb + xb)))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [Pinkse2004]_
* Kelejian and Prucha Moran's I [Kelejian2001]_
* Pinkse & Slade Error [Pinkse1998]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the CRIME column (crime) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept. Since we want to run a probit model and for this
example we use the Columbus data, we also need to transform the continuous
CRIME variable into a binary variable. As in [McMillen1992]_, we define
y = 1 if CRIME > 40.
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> y = (y>40).astype(float)
Extract HOVAL (home values) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'HOVAL']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to the test the probit model for spatial dependence, we need to
specify the spatial weights matrix that includes the spatial configuration of
the observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. In PySAL, this
can be easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = Probit(y, x, w=w, name_y='crime', name_x=['income','home value'], name_ds='columbus', name_w='columbus.gal')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them.
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
Since we have provided a spatial weigths matrix, the diagnostics for
spatial dependence have also been computed. We can access them and their
p-values individually:
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed simply by typing 'print model.summary'
"""
def __init__(
self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100,
vm=False, name_y=None, name_x=None, name_w=None, name_ds=None,
spat_diag=False):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
if w != None:
USER.check_weights(w, y)
spat_diag = True
ws = w.sparse
else:
ws = None
x_constant = USER.check_constant(x)
BaseProbit.__init__(self, y=y, x=x_constant, w=ws,
optim=optim, scalem=scalem, maxiter=maxiter)
self.title = "CLASSIC PROBIT ESTIMATOR"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.Probit(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def newton(flogl, start, fgrad, fhess, maxiter):
"""
Calculates the Newton-Raphson method
Parameters
----------
flogl : lambda
Function to calculate the log-likelihood
start : array
kx1 array of starting values
fgrad : lambda
Function to calculate the gradient
fhess : lambda
Function to calculate the hessian
maxiter : int
Maximum number of iterations until optimizer stops
"""
warn = 0
iteration = 0
par_hat0 = start
m = 1
while (iteration < maxiter and m >= 1e-04):
H = -la.inv(fhess(par_hat0))
g = fgrad(par_hat0).reshape(start.shape)
Hg = np.dot(H, g)
par_hat0 = par_hat0 + Hg
iteration += 1
m = np.dot(g.T, Hg)
if iteration == maxiter:
warn = 1
logl = flogl(par_hat0)
return (par_hat0, logl, warn)
def sp_tests(reg):
"""
Calculates tests for spatial dependence in Probit models
Parameters
----------
reg : regression object
output instance from a probit model
"""
if reg.w != None:
try:
w = reg.w.sparse
except:
w = reg.w
Phi = reg.predy
phi = reg.phiy
# Pinkse_error:
Phi_prod = Phi * (1 - Phi)
u_naive = reg.u_naive
u_gen = reg.u_gen
sig2 = np.sum((phi * phi) / Phi_prod) / reg.n
LM_err_num = np.dot(u_gen.T, (w * u_gen)) ** 2
trWW = np.sum((w * w).diagonal())
trWWWWp = trWW + np.sum((w * w.T).diagonal())
LM_err = float(1.0 * LM_err_num / (sig2 ** 2 * trWWWWp))
LM_err = np.array([LM_err, chisqprob(LM_err, 1)])
# KP_error:
moran = moran_KP(reg.w, u_naive, Phi_prod)
# Pinkse-Slade_error:
u_std = u_naive / np.sqrt(Phi_prod)
ps_num = np.dot(u_std.T, (w * u_std)) ** 2
trWpW = np.sum((w.T * w).diagonal())
ps = float(ps_num / (trWW + trWpW))
# chi-square instead of bootstrap.
ps = np.array([ps, chisqprob(ps, 1)])
else:
raise Exception, "W matrix must be provided to calculate spatial tests."
return LM_err, moran, ps
def moran_KP(w, u, sig2i):
"""
Calculates Moran-flavoured tests
Parameters
----------
w : W
PySAL weights instance aligned with y
u : array
nx1 array of naive residuals
sig2i : array
nx1 array of individual variance
"""
try:
w = w.sparse
except:
pass
moran_num = np.dot(u.T, (w * u))
E = SP.lil_matrix(w.get_shape())
E.setdiag(sig2i.flat)
E = E.asformat('csr')
WE = w * E
moran_den = np.sqrt(np.sum((WE * WE + (w.T * E) * WE).diagonal()))
moran = float(1.0 * moran_num / moran_den)
moran = np.array([moran, norm.sf(abs(moran)) * 2.])
return moran
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
var_x = ['INC', 'HOVAL']
x = np.array([dbf.by_col(name) for name in var_x]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
probit1 = Probit(
(y > 40).astype(float), x, w=w, name_x=var_x, name_y="CRIME",
name_ds="Columbus", name_w="columbus.dbf")
print probit1.summary
| |
import copy
import dominoes
import itertools
import random
def _randomized_hands():
'''
:return: 4 hands, obtained by shuffling the 28 dominoes used in
this variation of the game, and distributing them evenly
'''
all_dominoes = [dominoes.Domino(i, j) for i in range(7) for j in range(i, 7)]
random.shuffle(all_dominoes)
return [dominoes.Hand(all_dominoes[0:7]), dominoes.Hand(all_dominoes[7:14]),
dominoes.Hand(all_dominoes[14:21]), dominoes.Hand(all_dominoes[21:28])]
def _validate_player(player):
'''
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3.
:param int player: player to be validated
:return: None
:raises NoSuchPlayerException: if the player is invalid
'''
valid_players = range(4)
if player not in valid_players:
valid_players = ', '.join(str(p) for p in valid_players)
raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players'
' are: {}'.format(player, valid_players))
def _domino_hand(d, hands):
'''
:param Domino d: domino to find within the hands
:param list hands: hands to find domino in
:return: index of the hand that contains the specified domino
:raises NoSuchDominoException: if no hand contains the specified domino
'''
for i, hand in enumerate(hands):
if d in hand:
return i
raise dominoes.NoSuchDominoException('{} is not in any hand!'.format(d))
def _remaining_points(hands):
'''
:param list hands: hands for which to compute the remaining points
:return: a list indicating the amount of points
remaining in each of the input hands
'''
points = []
for hand in hands:
points.append(sum(d.first + d.second for d in hand))
return points
def _validate_hands(hands, missing):
'''
Validates hands, based on values that
are supposed to be missing from them.
:param list hands: list of Hand objects to validate
:param list missing: list of sets that indicate the values
that are supposed to be missing from
the respective Hand objects
:return: True if no Hand objects contain values that they
are supposed to be missing; False otherwise
'''
for h, m in zip(hands, missing):
for value in m:
if dominoes.hand.contains_value(h, value):
return False
return True
def _all_possible_partitionings(elements, sizes):
'''
Helper function for Game.all_possible_hands(). Given a set of elements
and the sizes of partitions, yields all possible partitionings of the
elements into partitions of the provided sizes.
:param set elements: a set of elements to partition.
:param list sizes: a list of sizes for the partitions. The sum of the
sizes should equal the length of the set of elements.
:yields: a tuple of tuples, each inner tuple corresponding to a partition.
'''
try:
# get the size of the current partition
size = sizes[0]
except IndexError:
# base case: no more sizes left
yield ()
return
# don't include the current size in the recursive calls
sizes = sizes[1:]
# iterate over all possible partitions of the current size
for partition in itertools.combinations(elements, size):
# recursive case: pass down the remaining elements and the remaining sizes
for other_partitions in _all_possible_partitionings(elements.difference(partition), sizes):
# put results together and yield up
yield (partition,) + other_partitions
def next_player(player):
'''
Returns the player that plays after the specified player.
:param int player: player for which to calculate the
next player. Must be 0, 1, 2, or 3.
:return: the next player
'''
return (player + 1) % 4
class Game:
'''
Python class for objects that represent a dominoes game.
This variation of the dominoes game is played
using 28 dominoes, which use values from 0 to 6:
.. code-block:: none
[0|0][0|1][0|2][0|3][0|4][0|5][0|6]
[1|1][1|2][1|3][1|4][1|5][1|6]
[2|2][2|3][2|4][2|5][2|6]
[3|3][3|4][3|5][3|6]
[4|4][4|5][4|6]
[5|5][5|6]
[6|6]
These dominoes are shuffled, and distributed evenly among
4 players. These players then sit on the edges of a square.
Players sitting opposite of each other are on the same team,
and the center of the square is the game board. Throughout
the game, each player will only be able to see their hand,
the game board, and the amount of dominoes left in the hands
of the other players. Note that no player can see the values
on the dominoes in the hands of the other players.
The 4 players will then take turns placing dominoes from their
hands onto the game board. The game board consists of a chain
of dominoes placed end to end such that the values on connected
ends always match.
Prior to distributing the dominoes, the 4 players will agree on
which player will play first, either by designating a specific
player or a specific domino that must be played first (often [6|6]).
After the game starts, play proceeds clockwise.
If a player is able to place a domino on the board, he/she must.
Only if they have no possible moves, can the pass on their turn.
The game ends either when a player runs out of dominoes or when no
player can play a domino (in which case we say the game is stuck).
If a player runs out of dominoes, his/her team will earn a number
of points computed by adding all the values of all the dominoes
remaining in the hands of the 3 other players.
If the game is stuck, each team will add up all the values of
all the dominoes remaining in their hands. The team with the
lower score wins, and earns a number of points computed by
adding both teams' scores. If both teams have the same score,
the game is declared a tie, and neither team earns any points.
:var board: the game board
:var hands: a list containing each player's hand
:var moves: a list of the moves that have been played. Moves are
represented by a tuple of Domino and bool. The domino
indicates the domino that was played, and the bool
indicates on what end of the board the domino was
played (True for left, False for right). If the player
passed, the move is None.
:var turn: the player whose turn it is
:var valid_moves: a tuple of valid moves for the player whose turn it is.
Moves are represented in the same way as in the moves list.
:var starting_player: first player to make a move
:var result: None if the game is in progress; otherwise a
Result object indicating the outcome of the game
.. code-block:: python
>>> import dominoes
>>> d = dominoes.Domino(6, 6)
>>> g = dominoes.Game.new(starting_domino=d)
>>> g
Board: [6|6]
Player 0's hand: [2|4][5|5][2|3][1|3][1|6][1|2]
Player 1's hand: [1|1][3|4][0|5][0|6][2|5][1|5][2|6]
Player 2's hand: [0|4][0|3][4|4][3|6][0|2][4|5][1|4]
Player 3's hand: [5|6][3|5][3|3][0|0][0|1][2|2][4|6]
Player 1's turn
>>> g.board
[6|6]
>>> g.hands
[[2|4][5|5][2|3][1|3][1|6][1|2], [1|1][3|4][0|5][0|6][2|5][1|5][2|6], [0|4][0|3][4|4][3|6][0|2][4|5][1|4], [5|6][3|5][3|3][0|0][0|1][2|2][4|6]]
>>> g.turn
1
>>> g.result
>>> g.valid_moves # True is for the left of the board, False is for the right
[([0|6], True), ([2|6], True)]
>>> g.make_move(*g.valid_moves[0])
>>> g.moves
[([6|6], True), ([0|6], True)]
>>> g
Board: [0|6][6|6]
Player 0's hand: [2|4][5|5][2|3][1|3][1|6][1|2]
Player 1's hand: [1|1][3|4][0|5][2|5][1|5][2|6]
Player 2's hand: [0|4][0|3][4|4][3|6][0|2][4|5][1|4]
Player 3's hand: [5|6][3|5][3|3][0|0][0|1][2|2][4|6]
Player 2's turn
>>> g.make_move(*g.valid_moves[0])
...
>>> g.make_move(*g.valid_moves[0])
Result(player=1, won=True, points=-32)
>>> g.result
Result(player=1, won=True, points=-32)
>>> g
Board: [2|6][6|3][3|4][4|1][1|1][1|6][6|4][4|5][5|2][2|4][4|0][0|6][6|6][6|5][5|0][0|3][3|5][5|5][5|1][1|0]
Player 0's hand: [2|3][1|3][1|2]
Player 1's hand:
Player 2's hand: [4|4][0|2]
Player 3's hand: [3|3][0|0][2|2]
Player 1 won and scored 32 points!
'''
def __init__(self, board, hands, moves, turn,
valid_moves, starting_player, result):
self.board = board
self.hands = hands
self.moves = moves
self.turn = turn
self.valid_moves = valid_moves
self.starting_player = starting_player
self.result = result
@classmethod
def new(cls, starting_domino=None, starting_player=0):
'''
:param Domino starting_domino: the domino that should be played
to start the game. The player
with this domino in their hand
will play first.
:param int starting_player: the player that should play first.
This value is ignored if a starting
domino is provided. Players are
referred to by their indexes: 0, 1,
2, and 3. 0 and 2 are on one team,
and 1 and 3 are on another team.
:return: a new game, initialized according to
starting_domino and starting_player
:raises NoSuchDominoException: if starting_domino is invalid
:raises NoSuchPlayerException: if starting_player is invalid
'''
board = dominoes.Board()
hands = _randomized_hands()
moves = []
result = None
if starting_domino is None:
_validate_player(starting_player)
valid_moves = tuple((d, True) for d in hands[starting_player])
game = cls(board, hands, moves, starting_player,
valid_moves, starting_player, result)
else:
starting_player = _domino_hand(starting_domino, hands)
valid_moves = ((starting_domino, True),)
game = cls(board, hands, moves, starting_player,
valid_moves, starting_player, result)
game.make_move(*valid_moves[0])
return game
def skinny_board(self):
'''
Converts the board representation used by this game from a regular
Board to a less descriptive but more memory efficient SkinnyBoard.
:return: None
'''
self.board = dominoes.SkinnyBoard.from_board(self.board)
def _update_valid_moves(self):
'''
Updates self.valid_moves according to the latest game state.
Assumes that the board and all hands are non-empty.
'''
left_end = self.board.left_end()
right_end = self.board.right_end()
moves = []
for d in self.hands[self.turn]:
if left_end in d:
moves.append((d, True))
# do not double count moves if both of the board's ends have
# the same value, and a domino can be placed on both of them
if right_end in d and left_end != right_end:
moves.append((d, False))
self.valid_moves = tuple(moves)
def make_move(self, d, left):
'''
Plays a domino from the hand of the player whose turn it is onto one
end of the game board. If the game does not end, the turn is advanced
to the next player who has a valid move.
Making a move is transactional - if the operation fails at any point,
the game will return to its state before the operation began.
:param Domino d: domino to be played
:param bool left: end of the board on which to play the
domino (True for left, False for right)
:return: a Result object if the game ends; None otherwise
:raises GameOverException: if the game has already ended
:raises NoSuchDominoException: if the domino to be played is not in
the hand of the player whose turn it is
:raises EndsMismatchException: if the domino cannot be placed on
the specified position in the board
'''
if self.result is not None:
raise dominoes.GameOverException('Cannot make a move - the game is over!')
i = self.hands[self.turn].play(d)
try:
self.board.add(d, left)
except dominoes.EndsMismatchException as error:
# return the domino to the hand if it cannot be placed on the board
self.hands[self.turn].draw(d, i)
raise error
# record the move
self.moves.append((d, left))
# check if the game ended due to a player running out of dominoes
if not self.hands[self.turn]:
self.valid_moves = ()
self.result = dominoes.Result(
self.turn, True, pow(-1, self.turn) * sum(_remaining_points(self.hands))
)
return self.result
# advance the turn to the next player with a valid move.
# if no player has a valid move, the game is stuck. also,
# record all the passes.
passes = []
stuck = True
for _ in self.hands:
self.turn = next_player(self.turn)
self._update_valid_moves()
if self.valid_moves:
self.moves.extend(passes)
stuck = False
break
else:
passes.append(None)
if stuck:
player_points = _remaining_points(self.hands)
team_points = [player_points[0] + player_points[2],
player_points[1] + player_points[3]]
if team_points[0] < team_points[1]:
self.result = dominoes.Result(self.turn, False, sum(team_points))
elif team_points[0] == team_points[1]:
self.result = dominoes.Result(self.turn, False, 0)
else:
self.result = dominoes.Result(self.turn, False, -sum(team_points))
return self.result
def missing_values(self):
'''
Computes the values that must be missing from each
player's hand, based on when they have passed.
:return: a list of sets, each one containing the
values that must be missing from the
corresponding player's hand
'''
missing = [set() for _ in self.hands]
# replay the game from the beginning
board = dominoes.SkinnyBoard()
player = self.starting_player
for move in self.moves:
if move is None:
# pass - update the missing values
missing[player].update([board.left_end(), board.right_end()])
else:
# not a pass - update the board
board.add(*move)
# move on to the next player
player = next_player(player)
return missing
def random_possible_hands(self):
'''
Returns random possible hands for all players, given the information
known by the player whose turn it is. This information includes the
current player's hand, the sizes of the other players' hands, and the
moves played by every player, including the passes.
:return: a list of possible Hand objects, corresponding to each player
'''
# compute values that must be missing from
# each hand, to rule out impossible hands
missing = self.missing_values()
# get the dominoes that are in all of the other hands. note that, even
# though we are 'looking' at the other hands to get these dominoes, we
# are not 'cheating' because these dominoes could also be computed by
# subtracting the dominoes that have been played (which are public
# knowledge) and the dominoes in the current player's hand from the
# initial set of dominoes
other_dominoes = [d for p, h in enumerate(self.hands) for d in h if p != self.turn]
while True:
# generator for a shuffled shallow copy of other_dominoes
shuffled_dominoes = (d for d in random.sample(other_dominoes, len(other_dominoes)))
# generate random hands by partitioning the shuffled dominoes according
# to how many dominoes need to be in each of the other hands. since we
# know the current player's hand, we just use a shallow copy of it
hands = []
for player, hand in enumerate(self.hands):
if player != self.turn:
hand = [next(shuffled_dominoes) for _ in hand]
hands.append(dominoes.Hand(hand))
# only return the hands if they are possible, according to the values we
# know to be missing from each hand. if the hands are not possible, try
# generating random hands again
if _validate_hands(hands, missing):
return hands
def all_possible_hands(self):
'''
Yields all possible hands for all players, given the information
known by the player whose turn it is. This information includes the
current player's hand, the sizes of the other players' hands, and the
moves played by every player, including the passes.
:yields: a list of possible Hand objects, corresponding to each player
'''
# compute values that must be missing from
# each hand, to rule out impossible hands
missing = self.missing_values()
# get the dominoes that are in all of the other hands. note that, even
# though we are 'looking' at the other hands to get these dominoes, we
# are not 'cheating' because these dominoes could also be computed by
# subtracting the dominoes that have been played (which are public
# knowledge) and the dominoes in the current player's hand from the
# initial set of dominoes
other_dominoes = {d for p, h in enumerate(self.hands) for d in h if p != self.turn}
# get the lengths of all the other hands, so
# that we know how many dominoes to place in each
other_hand_lengths = [len(h) for p, h in enumerate(self.hands) if p != self.turn]
# iterate over all possible hands that the other players might have
for possible_hands in _all_possible_partitionings(other_dominoes, other_hand_lengths):
# given possible hands for all players, this is a generator for
# tuples containing the dominoes that are in the other players' hands
possible_hands = (h for h in possible_hands)
# build a list containing possible hands for all players. since we
# know the current player's hand, we just use a shallow copy of it
hands = []
for player, hand in enumerate(self.hands):
if player != self.turn:
hand = next(possible_hands)
hands.append(dominoes.Hand(hand))
# only yield the hands if they are possible, according
# to the values we know to be missing from each hand
if _validate_hands(hands, missing):
yield hands
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __deepcopy__(self, _):
if isinstance(self.board, dominoes.SkinnyBoard):
if self.board:
# SkinnyBoard attributes are ints; no need to deepcopy
board = dominoes.SkinnyBoard(self.board.left_end(),
self.board.right_end(),
len(self.board))
else:
# board is empty
board = dominoes.SkinnyBoard()
else:
# TODO: optimize for Board class
board = copy.deepcopy(self.board)
# only need to copy the Hand, because the Domino objects are
# immutable. note that using copy.copy does not work because
# the container of the Domino objects within the Hand also
# needs to be copied, which the Hand initializer takes care of.
hands = [dominoes.Hand(hand) for hand in self.hands]
# list of tuples of Domino and bool; shallow copy is sufficient
moves = list(self.moves)
# tuple of immutable Domino objects; no need to deepcopy
valid_moves = self.valid_moves
# None or namedtuple of ints and bools; no need to deepcopy
result = self.result
# just an int; no need to deepcopy
turn = self.turn
# just an int; no need to deepcopy
starting_player = self.starting_player
return type(self)(board, hands, moves, turn,
valid_moves, starting_player, result)
def __str__(self):
string_list = ['Board: {}'.format(self.board)]
for i, hand in enumerate(self.hands):
string_list.append("Player {}'s hand: {}".format(i, hand))
if self.result is None:
string_list.append("Player {}'s turn".format(self.turn))
else:
if self.result.won:
string_list.append(
'Player {} won and scored {} points!'.format(self.result.player,
abs(self.result.points))
)
else:
if not self.result.points:
string_list.append(
'Player {} stuck the game and tied (0 points)!'.format(self.result.player)
)
elif pow(-1, self.result.player) * self.result.points > 0:
string_list.append(
'Player {} stuck the game and scored {} points!'.format(self.result.player,
abs(self.result.points))
)
else:
string_list.append(
'Player {} stuck the game and scored'
' {} points for the opposing team!'.format(self.result.player,
abs(self.result.points))
)
return '\n'.join(string_list)
def __repr__(self):
return str(self)
| |
from abc import ABC, abstractmethod
from collections import defaultdict
from datetime import date, datetime, timedelta
from io import StringIO
import logging
import pytz
from typing import Collection, List
from sqlalchemy.orm import Session
from rdr_service.dao.consent_dao import ConsentDao
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.consent_file import ConsentFile as ParsingResult, ConsentSyncStatus, ConsentType,\
ConsentOtherErrors
from rdr_service.model.consent_response import ConsentResponse
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.participant_enums import ParticipantCohort, QuestionnaireStatus
from rdr_service.resource.tasks import dispatch_rebuild_consent_metrics_tasks, dispatch_check_consent_errors_task
from rdr_service.services.consent import files
from rdr_service.storage import GoogleCloudStorageProvider
class ValidationOutputStrategy(ABC):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_results()
def add_all(self, result_collection: Collection[ParsingResult]):
for result in result_collection:
self.add_result(result)
@abstractmethod
def add_result(self, result: ParsingResult):
...
@abstractmethod
def process_results(self):
...
@classmethod
def _build_consent_list_structure(cls):
def participant_results():
return defaultdict(lambda: [])
return defaultdict(participant_results)
class StoreResultStrategy(ValidationOutputStrategy):
def __init__(self, session, consent_dao: ConsentDao, project_id=None):
self._session = session
self._results = []
self._consent_dao = consent_dao
self._max_batch_count = 500
self.project_id = project_id
def add_result(self, result: ParsingResult):
self._results.append(result)
if len(self._results) > self._max_batch_count:
self.process_results()
self._results = []
def _get_existing_results_for_participants(self):
return self._consent_dao.get_validation_results_for_participants(
session=self._session,
participant_ids={result.participant_id for result in self._results}
)
@classmethod
def _file_in_collection(cls, file, collection):
return any([file.file_path == possible_matching_file.file_path for possible_matching_file in collection])
def process_results(self):
previous_results = self._get_existing_results_for_participants()
new_results_to_store = _ValidationOutputHelper.get_new_validation_results(
existing_results=previous_results,
results_to_filter=self._results
)
self._consent_dao.batch_update_consent_files(new_results_to_store, self._session)
self._session.commit()
if new_results_to_store:
dispatch_rebuild_consent_metrics_tasks([r.id for r in new_results_to_store], project_id=self.project_id)
class ReplacementStoringStrategy(ValidationOutputStrategy):
def __init__(self, session, consent_dao: ConsentDao, project_id=None):
self.session = session
self.consent_dao = consent_dao
self.participant_ids = set()
self.results = self._build_consent_list_structure()
self._max_batch_count = 500
self.project_id = project_id
def add_result(self, result: ParsingResult):
self.results[result.participant_id][result.type].append(result)
self.participant_ids.add(result.participant_id)
if len(self.participant_ids) > self._max_batch_count:
self.process_results()
self.results = self._build_consent_list_structure()
self.participant_ids = set()
def _build_previous_result_map(self):
results = self._build_consent_list_structure()
previous_results = self.consent_dao.get_validation_results_for_participants(
session=self.session,
participant_ids=self.participant_ids
)
for result in previous_results:
results[result.participant_id][result.type].append(result)
return results
def process_results(self):
organized_previous_results = self._build_previous_result_map()
results_to_update = []
for participant_id, consent_type_dict in self.results.items():
for consent_type, result_list in consent_type_dict.items():
previous_type_list: Collection[ParsingResult] = organized_previous_results[participant_id][consent_type]
new_results = _ValidationOutputHelper.get_new_validation_results(
existing_results=previous_type_list,
results_to_filter=result_list
)
if new_results:
ready_for_sync = self._find_file_ready_for_sync(result_list)
if ready_for_sync:
for result in previous_type_list:
if result.sync_status == ConsentSyncStatus.NEEDS_CORRECTING:
result.sync_status = ConsentSyncStatus.OBSOLETE
results_to_update.append(result)
results_to_update.append(ready_for_sync)
else:
results_to_update.extend(new_results)
self.consent_dao.batch_update_consent_files(results_to_update, self.session)
self.session.commit()
if results_to_update:
dispatch_rebuild_consent_metrics_tasks([r.id for r in results_to_update], project_id=self.project_id)
@classmethod
def _find_file_ready_for_sync(cls, results: List[ParsingResult]):
for result in results:
if result.sync_status == ConsentSyncStatus.READY_FOR_SYNC:
return result
return None
class UpdateResultStrategy(ReplacementStoringStrategy):
def _get_existing_results_for_participants(self):
file_path_list = [file.file_path for file in self.results]
file_objects: List[ParsingResult] = self.session.query(ParsingResult).filter(
ParsingResult.file_path.in_(file_path_list)
).all()
return {file.file_path: file for file in file_objects}
def process_results(self):
organized_previous_results = self._build_previous_result_map()
results_to_build = []
for participant_id, consent_type_dict in self.results.items():
for consent_type, result_list in consent_type_dict.items():
previous_type_list: Collection[ParsingResult] = organized_previous_results[participant_id][consent_type]
# Set the last_checked time for all the matching validation results
for previous_result in previous_type_list:
previous_result.last_checked = datetime.utcnow()
ready_for_sync = self._find_file_ready_for_sync(result_list)
if ready_for_sync:
found_in_previous_results = False
for previous_result in previous_type_list:
if previous_result.file_path == ready_for_sync.file_path:
self._update_record(new_result=ready_for_sync, existing_result=previous_result)
results_to_build.append(previous_result)
found_in_previous_results = True
elif previous_result.sync_status == ConsentSyncStatus.NEEDS_CORRECTING:
previous_result.sync_status = ConsentSyncStatus.OBSOLETE
results_to_build.append(previous_result)
if not found_in_previous_results:
results_to_build.append(ready_for_sync)
self.session.add(ready_for_sync)
self.session.commit()
if results_to_build:
dispatch_rebuild_consent_metrics_tasks([r.id for r in results_to_build], project_id=self.project_id)
@classmethod
def _update_record(cls, new_result: ParsingResult, existing_result: ParsingResult):
existing_result.file_exists = new_result.file_exists
existing_result.type = new_result.type
existing_result.is_signature_valid = new_result.is_signature_valid
existing_result.is_signing_date_valid = new_result.is_signing_date_valid
existing_result.signature_str = new_result.signature_str
existing_result.is_signature_image = new_result.is_signature_image
existing_result.signing_date = new_result.signing_date
existing_result.expected_sign_date = new_result.expected_sign_date
existing_result.file_upload_time = new_result.file_upload_time
existing_result.other_errors = new_result.other_errors
if existing_result.sync_status != ConsentSyncStatus.SYNC_COMPLETE \
or new_result.sync_status == ConsentSyncStatus.NEEDS_CORRECTING:
existing_result.sync_status = new_result.sync_status
class LogResultStrategy(ValidationOutputStrategy):
def __init__(self, logger, verbose, storage_provider: GoogleCloudStorageProvider):
self.logger = logger
self.verbose = verbose
self.storage_provider = storage_provider
self.results = self._build_consent_list_structure()
def add_result(self, result: ParsingResult):
self.results[result.participant_id][result.type].append(result)
def process_results(self):
report_lines = []
for validation_categories in self.results.values():
if self.verbose:
report_lines.append('')
for result_list in validation_categories.values():
for result in result_list:
report_lines.append(self._line_output_for_validation(result, verbose=self.verbose))
self.logger.info('\n'.join(report_lines))
def _line_output_for_validation(self, file: ParsingResult, verbose: bool):
output_line = StringIO()
if verbose:
output_line.write(f'{str(file.id).ljust(8)} - ')
output_line.write(f'P{file.participant_id} - {str(file.type).ljust(10)} ')
if not file.file_exists:
output_line.write('missing file')
else:
errors_with_file = []
if not file.is_signature_valid:
errors_with_file.append('invalid signature')
if not file.is_signing_date_valid:
errors_with_file.append(self._get_date_error_details(file, verbose))
if file.other_errors is not None:
errors_with_file.append(file.other_errors)
output_line.write(', '.join(errors_with_file))
if verbose:
output_line.write(f'\n{self._get_link(file)}')
return output_line.getvalue()
@classmethod
def _get_date_error_details(cls, file: ParsingResult, verbose: bool = False):
extra_info = ''
if verbose and file.signing_date and file.expected_sign_date:
time_difference = file.signing_date - file.expected_sign_date
extra_info = f', diff of {time_difference.days} days'
return f'invalid signing date (expected {file.expected_sign_date} '\
f'but file has {file.signing_date}{extra_info})'
def _get_link(self, file: ParsingResult):
bucket_name, *name_parts = file.file_path.split('/')
blob = self.storage_provider.get_blob(
bucket_name=bucket_name,
blob_name='/'.join(name_parts)
)
return blob.generate_signed_url(datetime.utcnow() + timedelta(hours=2))
class _ValidationOutputHelper:
"""Class for containing generic and reusable code for output strategies"""
@classmethod
def get_new_validation_results(cls, existing_results: Collection[ParsingResult],
results_to_filter: Collection[ParsingResult]):
"""
Checks each validation result in results_to_filter
and returns a list of results that are not in existing_results
"""
return [file for file in results_to_filter if not cls._is_file_in_collection(file, existing_results)]
@classmethod
def _is_file_in_collection(cls, file: ParsingResult, file_collection: Collection[ParsingResult]):
if file.file_exists:
return any(
[file.file_path == possible_matching_file.file_path for possible_matching_file in file_collection]
) or any(
[file.type == possible_matching_file.type
and possible_matching_file.sync_status in
(ConsentSyncStatus.READY_FOR_SYNC, ConsentSyncStatus.SYNC_COMPLETE)
and file.participant_id == possible_matching_file.participant_id
for possible_matching_file in file_collection]
)
else:
return any([
file.type == possible_matching_file.type
and file.participant_id == possible_matching_file.participant_id
for possible_matching_file in file_collection
])
class ConsentValidationController:
def __init__(self, consent_dao: ConsentDao, participant_summary_dao: ParticipantSummaryDao,
hpo_dao: HPODao, storage_provider: GoogleCloudStorageProvider):
self.consent_dao = consent_dao
self.participant_summary_dao = participant_summary_dao
self.storage_provider = storage_provider
self.va_hpo_id = hpo_dao.get_by_name('VA').hpoId
@classmethod
def build_controller(cls):
return ConsentValidationController(
consent_dao=ConsentDao(),
participant_summary_dao=ParticipantSummaryDao(),
hpo_dao=HPODao(),
storage_provider=GoogleCloudStorageProvider()
)
def check_for_corrections(self, session):
"""Load all of the current consent issues and see if they have been resolved yet"""
checks_needed = self.consent_dao.get_next_revalidate_batch(session)
with UpdateResultStrategy(session=session, consent_dao=self.consent_dao) as storage_strategy:
for participant_id, consent_type in checks_needed:
participant_summary: ParticipantSummary = self.participant_summary_dao.get_with_session(
obj_id=participant_id,
session=session
)
validator = self._build_validator(participant_summary)
if consent_type == ConsentType.PRIMARY:
storage_strategy.add_all(validator.get_primary_validation_results())
elif consent_type == ConsentType.CABOR:
storage_strategy.add_all(validator.get_cabor_validation_results())
elif consent_type == ConsentType.EHR:
storage_strategy.add_all(validator.get_ehr_validation_results())
elif consent_type == ConsentType.GROR:
storage_strategy.add_all(validator.get_gror_validation_results())
elif consent_type == ConsentType.PRIMARY_UPDATE:
storage_strategy.add_all(validator.get_primary_update_validation_results())
def validate_consent_responses(self, summary: ParticipantSummary, output_strategy: ValidationOutputStrategy,
consent_responses: Collection[ConsentResponse]):
validator = self._build_validator(summary)
validation_method_map = {
ConsentType.PRIMARY: validator.get_primary_validation_results,
ConsentType.CABOR: validator.get_cabor_validation_results,
ConsentType.EHR: validator.get_ehr_validation_results,
ConsentType.GROR: validator.get_gror_validation_results,
ConsentType.PRIMARY_UPDATE: validator.get_primary_update_validation_results
}
for consent_response in consent_responses:
get_validation_results_func = validation_method_map[consent_response.type]
validation_results = self._process_validation_results(
get_validation_results_func(expected_signing_date=consent_response.response.authored)
)
for result in validation_results:
result.consent_response = consent_response
output_strategy.add_all(validation_results)
def validate_participant_consents(self, summary: ParticipantSummary, output_strategy: ValidationOutputStrategy,
min_authored_date: date = None, max_authored_date: date = None,
types_to_validate: Collection[ConsentType] = None):
validator = self._build_validator(summary)
if self._check_consent_type(ConsentType.PRIMARY, types_to_validate) and self._has_consent(
consent_status=summary.consentForStudyEnrollment,
authored=summary.consentForStudyEnrollmentFirstYesAuthored,
min_authored=min_authored_date,
max_authored=max_authored_date
):
output_strategy.add_all(self._process_validation_results(validator.get_primary_validation_results()))
if self._check_consent_type(ConsentType.CABOR, types_to_validate) and self._has_consent(
consent_status=summary.consentForCABoR,
authored=summary.consentForCABoRAuthored,
min_authored=min_authored_date,
max_authored=max_authored_date
):
output_strategy.add_all(self._process_validation_results(validator.get_cabor_validation_results()))
if self._check_consent_type(ConsentType.EHR, types_to_validate) and self._has_consent(
consent_status=summary.consentForElectronicHealthRecords,
authored=summary.consentForElectronicHealthRecordsAuthored,
min_authored=min_authored_date,
max_authored=max_authored_date
):
output_strategy.add_all(self._process_validation_results(validator.get_ehr_validation_results()))
if self._check_consent_type(ConsentType.GROR, types_to_validate) and self._has_consent(
consent_status=summary.consentForGenomicsROR,
authored=summary.consentForGenomicsRORAuthored,
min_authored=min_authored_date,
max_authored=max_authored_date
):
output_strategy.add_all(self._process_validation_results(validator.get_gror_validation_results()))
if self._check_consent_type(ConsentType.PRIMARY_UPDATE, types_to_validate) and self._has_primary_update_consent(
summary=summary,
min_authored=min_authored_date,
max_authored=max_authored_date
):
output_strategy.add_all(self._process_validation_results(validator.get_primary_update_validation_results()))
def validate_consent_uploads(self, session: Session, output_strategy: ValidationOutputStrategy,
min_consent_date=None, max_consent_date=None):
"""
Find all the expected consents (filtering by dates if provided) and check the files that have been uploaded
"""
validation_start_time = datetime.utcnow().replace(microsecond=0)
# Retrieve consent response objects that need to be validated
participant_id_consent_map = self.consent_dao.get_consent_responses_to_validate(session=session)
participant_summaries = self.participant_summary_dao.get_by_ids_with_session(
session=session,
obj_ids=participant_id_consent_map.keys()
)
for summary in participant_summaries:
self.validate_consent_responses(
summary=summary,
output_strategy=output_strategy,
consent_responses=participant_id_consent_map[summary.participantId]
)
output_strategy.process_results()
# Use the legacy query for the day that the updated check is released (and in case any are missed)
summaries_needing_validated = self.consent_dao.get_participants_with_unvalidated_files(session)
logging.info(f'{len(summaries_needing_validated)} participants still needed validation')
for summary in summaries_needing_validated:
self.validate_participant_consents(
summary=summary,
output_strategy=output_strategy,
min_authored_date=min_consent_date,
max_authored_date=max_consent_date
)
# Queue a task to check for new errors to report to PTSC
dispatch_check_consent_errors_task(validation_start_time)
def validate_all_for_participant(self, participant_id: int, output_strategy: ValidationOutputStrategy):
summary: ParticipantSummary = self.participant_summary_dao.get(participant_id)
validator = self._build_validator(summary)
if self._has_consent(consent_status=summary.consentForStudyEnrollment):
output_strategy.add_all(validator.get_primary_validation_results())
if self._has_consent(consent_status=summary.consentForCABoR):
output_strategy.add_all(validator.get_cabor_validation_results())
if self._has_consent(consent_status=summary.consentForElectronicHealthRecords):
output_strategy.add_all(validator.get_ehr_validation_results())
if self._has_consent(consent_status=summary.consentForGenomicsROR):
output_strategy.add_all(validator.get_gror_validation_results())
if self._has_primary_update_consent(summary):
output_strategy.add_all(validator.get_primary_update_validation_results())
@classmethod
def _check_consent_type(cls, consent_type: ConsentType, to_check_list: Collection[ConsentType]):
if to_check_list is None:
return True
else:
return consent_type in to_check_list
@classmethod
def _process_validation_results(cls, results: List[ParsingResult]):
ready_file = cls._find_file_ready_for_sync(results)
if ready_file:
return [ready_file]
else:
return results
@classmethod
def _has_consent(cls, consent_status, authored=None, min_authored=None, max_authored=None):
return (
consent_status == QuestionnaireStatus.SUBMITTED
and (min_authored is None or authored > min_authored)
and (max_authored is None or authored < max_authored)
)
@classmethod
def _has_primary_update_consent(cls, summary: ParticipantSummary, min_authored=None, max_authored=None):
if (
(min_authored is None or summary.consentForStudyEnrollmentAuthored > min_authored)
and (max_authored is None or summary.consentForStudyEnrollmentAuthored < max_authored)
):
return (
summary.consentCohort == ParticipantCohort.COHORT_1 and
summary.consentForStudyEnrollmentAuthored.date() !=
summary.consentForStudyEnrollmentFirstYesAuthored.date()
)
else:
return False
def _build_validator(self, participant_summary: ParticipantSummary) -> 'ConsentValidator':
consent_factory = files.ConsentFileAbstractFactory.get_file_factory(
participant_id=participant_summary.participantId,
participant_origin=participant_summary.participantOrigin,
storage_provider=self.storage_provider
)
return ConsentValidator(
consent_factory=consent_factory,
participant_summary=participant_summary,
va_hpo_id=self.va_hpo_id
)
@classmethod
def _organize_results(cls, results: Collection[ParsingResult]):
"""
Organize the validation results by participant id and then
consent type to make it easier for checking for updates for them
"""
def new_participant_results():
return defaultdict(lambda: [])
organized_results = defaultdict(new_participant_results)
for result in results:
organized_results[result.participant_id][result.type].append(result)
return organized_results
@classmethod
def _find_file_ready_for_sync(cls, results: List[ParsingResult]):
for result in results:
if result.sync_status == ConsentSyncStatus.READY_FOR_SYNC:
return result
return None
@classmethod
def _find_matching_validation_result(cls, new_result: ParsingResult, previous_results: List[ParsingResult]):
"""Return the corresponding object from the list. They're matched up based on the file path."""
for previous_result in previous_results:
if new_result.file_path == previous_result.file_path:
return previous_result
return None
class ConsentValidator:
def __init__(self, consent_factory: files.ConsentFileAbstractFactory,
participant_summary: ParticipantSummary,
va_hpo_id: int):
self.factory = consent_factory
self.participant_summary = participant_summary
self.va_hpo_id = va_hpo_id
self._central_time = pytz.timezone('America/Chicago')
def get_primary_validation_results(self, expected_signing_date: datetime = None) -> List[ParsingResult]:
if expected_signing_date is None:
expected_signing_date = self.participant_summary.consentForStudyEnrollmentFirstYesAuthored
return self._generate_validation_results(
consent_files=self.factory.get_primary_consents(),
consent_type=ConsentType.PRIMARY,
additional_validation=self._validate_is_va_file,
expected_sign_datetime=expected_signing_date
)
def get_ehr_validation_results(self, expected_signing_date: datetime = None) -> List[ParsingResult]:
if expected_signing_date is None:
expected_signing_date = self.participant_summary.consentForElectronicHealthRecordsAuthored
return self._generate_validation_results(
consent_files=self.factory.get_ehr_consents(),
consent_type=ConsentType.EHR,
additional_validation=self._validate_is_va_file,
expected_sign_datetime=expected_signing_date
)
def get_cabor_validation_results(self, expected_signing_date: datetime = None) -> List[ParsingResult]:
if expected_signing_date is None:
expected_signing_date = self.participant_summary.consentForCABoRAuthored
return self._generate_validation_results(
consent_files=self.factory.get_cabor_consents(),
consent_type=ConsentType.CABOR,
expected_sign_datetime=expected_signing_date
)
def get_gror_validation_results(self, expected_signing_date: datetime = None) -> List[ParsingResult]:
if expected_signing_date is None:
expected_signing_date = self.participant_summary.consentForGenomicsRORAuthored
def check_for_checkmark(consent: files.GrorConsentFile, result):
if not consent.is_confirmation_selected():
result.other_errors = ConsentOtherErrors.MISSING_CONSENT_CHECK_MARK
result.sync_status = ConsentSyncStatus.NEEDS_CORRECTING
return self._generate_validation_results(
consent_files=self.factory.get_gror_consents(),
consent_type=ConsentType.GROR,
additional_validation=check_for_checkmark,
expected_sign_datetime=expected_signing_date
)
def get_primary_update_validation_results(self, expected_signing_date: datetime = None) -> List[ParsingResult]:
if expected_signing_date is None:
expected_signing_date = self.participant_summary.consentForStudyEnrollmentAuthored
def extra_primary_update_checks(consent: files.PrimaryConsentUpdateFile, result):
errors_detected = []
if not consent.is_agreement_selected():
errors_detected.append(ConsentOtherErrors.MISSING_CONSENT_CHECK_MARK)
va_version_error_str = self._check_for_va_version_mismatch(consent)
if va_version_error_str:
errors_detected.append(va_version_error_str)
if errors_detected:
result.other_errors = ', '.join(errors_detected)
result.sync_status = ConsentSyncStatus.NEEDS_CORRECTING
return self._generate_validation_results(
consent_files=self.factory.get_primary_update_consents(
self.participant_summary.consentForStudyEnrollmentAuthored
),
consent_type=ConsentType.PRIMARY_UPDATE,
additional_validation=extra_primary_update_checks,
expected_sign_datetime=expected_signing_date
)
def _check_for_va_version_mismatch(self, consent):
is_va_consent = consent.get_is_va_consent()
if self.participant_summary.hpoId == self.va_hpo_id and not is_va_consent:
return ConsentOtherErrors.NON_VETERAN_CONSENT_FOR_VETERAN
elif self.participant_summary.hpoId != self.va_hpo_id and is_va_consent:
return ConsentOtherErrors.VETERAN_CONSENT_FOR_NON_VETERAN
return None
def _validate_is_va_file(self, consent, result: ParsingResult):
mismatch_error_str = self._check_for_va_version_mismatch(consent)
if mismatch_error_str:
result.other_errors = mismatch_error_str
result.sync_status = ConsentSyncStatus.NEEDS_CORRECTING
def _generate_validation_results(self, consent_files: List[files.ConsentFile], consent_type: ConsentType,
expected_sign_datetime: datetime,
additional_validation=None) -> List[ParsingResult]:
results = []
for consent in consent_files:
result = self._build_validation_result(consent, consent_type, expected_sign_datetime)
if additional_validation:
additional_validation(consent, result)
results.append(result)
if not results:
results.append(ParsingResult(
participant_id=self.participant_summary.participantId,
file_exists=False,
type=consent_type,
sync_status=ConsentSyncStatus.NEEDS_CORRECTING
))
return results
def _build_validation_result(self, consent: files.ConsentFile, consent_type: ConsentType,
expected_sign_datetime: datetime):
"""
Used to check generic data found on all consent types,
additional result information should be validated for each type
"""
result = ParsingResult(
participant_id=self.participant_summary.participantId,
file_exists=True,
type=consent_type,
file_upload_time=consent.upload_time,
file_path=consent.file_path
)
self._store_signature(result=result, consent_file=consent)
result.signing_date = consent.get_date_signed()
result.expected_sign_date = self._get_date_from_datetime(expected_sign_datetime)
result.is_signing_date_valid = self._is_signing_date_valid(
signing_date=result.signing_date,
expected_date=result.expected_sign_date
)
if result.is_signature_valid and result.is_signing_date_valid:
result.sync_status = ConsentSyncStatus.READY_FOR_SYNC
else:
result.sync_status = ConsentSyncStatus.NEEDS_CORRECTING
return result
@classmethod
def _store_signature(cls, result: ParsingResult, consent_file: files.ConsentFile):
signature = consent_file.get_signature_on_file()
result.is_signature_valid = bool(signature)
if signature is True: # True returned for when images are found
result.is_signature_image = True
elif signature is not None:
result.signature_str = signature[:ParsingResult.signature_str.type.length]
@classmethod
def _is_signing_date_valid(cls, signing_date, expected_date: date):
if not signing_date or not expected_date:
return False
else:
days_off = (signing_date - expected_date).days
return abs(days_off) < 10
def _get_date_from_datetime(self, timestamp: datetime):
return timestamp.replace(tzinfo=pytz.utc).astimezone(self._central_time).date()
| |
#!/usr/bin/env python
# #########################################################################
# LugMS - Linux User/Group Management System
# ==========================================
#
# LugMS is a text-based user/group management system for GNU/linux hosts.
# ...more documentation to come.
# #########################################################################
__author__ = 'Blayne Campbell'
__date__ = '8/6/14'
__version__ = '0.7'
from datetime import datetime
import subprocess
import snack
import sys
import os
# Set Working Directory
abspath = os.path.realpath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# import application settings (see settings.py)
try:
import lugms_settings
except ImportError:
sys.exit("Settings file not found.. exiting.")
from fabfile import temp
# Get console size to help with formatting
r, c = os.popen('stty size', 'r').read().split()
# Create list of valid hosts for individual host validation
validhosts = list()
with open(lugms_settings.all_hosts, 'r') as f:
for i in f:
validhosts.append(i)
validhosts = map(lambda x: x.strip('\n'), validhosts)
class LugTask:
def __init__(self):
self.screen = snack.SnackScreen()
self.screen.drawRootText(1, 1, "Linux User & Group Management v.%s"
% __version__)
self.screen.drawRootText(int(c) - (10 + len(lugms_settings.support)),
int(r) - 2,
"Support: %s" % lugms_settings.support)
self.screen.refresh()
self.task_type = None
self.task_user = None
self.task_info = None
self.task_usermod = None
self.task_modcmt = None
self.task_grpmod = None
self.task_hosts = None
self.task_hostsind = None
self.user_comment = None
self.task_debug = None
def reset_task(self):
dic = vars(self)
skip = ['screen']
for i in dic.keys():
if i not in skip:
dic[i] = None
def exit_menu(self):
pass
def get_task(self):
self.reset_task()
i = snack.ListboxChoiceWindow(self.screen,
"Select Task",
"",
[('Add User',
('Add User', 'adduser')),
('User Info',
('User Info', 'userinfo')),
('Modify User',
('Modify User', 'moduser')),
('Delete User',
("Delete User", "deluser"))],
buttons=['Exit'], width=65)
if i and i[0] != 'exit':
self.task_type = i[1]
if task.task_type[1] == 'adduser':
task.create_user()
elif task.task_type[1] == 'userinfo':
task.user_info()
elif task.task_type[1] == 'moduser':
task.mod_user()
elif task.task_type[1] == 'deluser':
task.set_user()
self.set_hosts()
else:
self.exit_menu()
def create_user(self):
i = snack.EntryWindow(self.screen,
self.task_type[0],
"Please enter User Information",
["User ID: ",
"First Name",
"Last Name",
("Company/Department", lugms_settings.company),
("Phone Number", lugms_settings.phone),
("Accnt Type", lugms_settings.acct_type)],
buttons=['Ok', 'Back'],
width=65, entryWidth=40)
if i[0] != 'back':
self.task_user = i
cstring = str()
for v in self.task_user[1][1:]:
if v.strip():
cstring = cstring + v + " "
self.user_comment = cstring.rstrip()
self.set_hosts()
else:
self.get_task()
def user_info(self):
self.set_user()
i = snack.ListboxChoiceWindow(self.screen,
self.task_type[0],
"Select Task",
[("Check User's Group Membership",
("Check User's Group Membership",
"check_group"))],
buttons=['Ok', 'Back'], width=65)
if i[0] != 'back':
self.task_info = i
self.set_hosts()
else:
self.get_task()
def mod_user(self):
self.task_usermod = None
self.task_grpmod = None
self.task_modcmt = None
i = snack.ListboxChoiceWindow(self.screen,
self.task_type[0],
"Pick a User Modification Task",
[("Add Group Membership", "addgrp"),
("Remove Group Membership", "remgrp"),
("Modify Account Comment", "modcmt")],
buttons=['Ok', 'Back'], width=65)
if i[0] != 'back':
self.task_usermod = i
if i[1] in ['addgrp', 'remgrp']:
self.set_user()
grp = snack.EntryWindow(self.screen,
self.task_type[0],
"Provide Groups: "
"(ie: group1,group2,group3)\n",
["Groups: "],
buttons=['Ok', 'Back'],
width=65, entryWidth=40)
if grp[0] != 'back':
self.task_grpmod = grp
self.set_hosts()
else:
self.mod_user()
elif i[1] == 'modcmt':
self.set_user()
cmt = snack.EntryWindow(self.screen,
self.task_type[0],
"Update info for %s"
% self.task_user[1][0],
["First Name",
"Last Name",
("Company/Department",
lugms_settings.company),
("Phone Number",
lugms_settings.phone),
("Accnt Type",
lugms_settings.acct_type)],
buttons=['Ok', 'Back'],
width=65, entryWidth=40)
if cmt[0] != 'back':
self.task_modcmt = cmt
cstring = str()
for v in self.task_modcmt[1]:
if v.strip():
cstring = cstring + v + " "
self.user_comment = cstring.rstrip()
self.set_hosts()
else:
self.mod_user()
else:
self.get_task()
def set_user(self):
i = snack.EntryWindow(self.screen,
self.task_type[0],
"Please enter User ID",
["User ID: "],
buttons=['Ok', 'Back'],
width=65, entryWidth=20)
if i[0] != 'back':
self.task_user = i
else:
self.get_task()
def set_hosts(self, val=None):
i = snack.ListboxChoiceWindow(self.screen,
self.task_type[0],
"Select Servers",
[('Specific Servers', "hosts_ind"),
('All Linux Servers', 'hosts_all')],
buttons=['Ok', 'Back'], width=65)
if i[0] != 'back':
self.task_hosts = i
if i[1] == 'hosts_ind':
if val:
ind = snack.EntryWindow(self.screen,
self.task_type[0],
"Provide Hostnames: "
"(ie: server1,server2,server3)",
[("Hosts: ", val)],
buttons=['Ok', 'Back'],
width=65, entryWidth=40)
else:
ind = snack.EntryWindow(self.screen,
self.task_type[0],
"Provide Host Names\n"
"ie: server1,server2,server3",
["Hosts: "],
buttons=['Ok', 'Back'],
width=65, entryWidth=40)
if ind[0] != 'back' and len(ind[1][0].split(',')) >= 1:
taskhosts = str()
invalhosts = str()
hostlist = ind[1][0].split(',')
hostlist = list(set(hostlist))
hostlist.sort()
for host in hostlist:
if host in validhosts:
taskhosts = taskhosts + (host + ',')
else:
invalhosts = invalhosts + (host + ',')
taskhosts = taskhosts.rstrip(",")
invalhosts = invalhosts.rstrip(",")
if taskhosts == '':
snack.ButtonChoiceWindow(self.screen,
self.task_type[0],
"No Valid Hostnames Provided",
buttons=['Ok'], width=65)
self.set_hosts()
elif len(invalhosts) > 1:
snack.ButtonChoiceWindow(self.screen,
self.task_type[0],
"Valid hostnames: %s\n"
"\nInvalid hostnames: %s\n"
"\nPlease re-verify hosts.."
% (taskhosts, invalhosts),
buttons=['Verify Hosts'],
width=65)
self.set_hosts(taskhosts)
else:
self.task_hostsind = ind
else:
self.set_hosts()
else:
self.get_task()
def confirm(self):
if self.task_type:
tasktype = self.task_type[0]
taskusers = str()
userlist = self.task_user[1][0]
taskhosts = str()
for user in userlist.split(","):
taskusers += taskusers + (user + "\n")
if self.task_hosts[1] == 'hosts_all':
taskhosts = 'All Servers'
else:
hostlist = self.task_hostsind[1][0].split(',')
hostlist = list(set(hostlist))
hostlist.sort()
for host in hostlist:
taskhosts = taskhosts + (host + "\n\t")
if self.task_type[1] == 'adduser' or \
(self.task_type[1] == 'moduser' and self.user_comment):
i = snack.ButtonChoiceWindow(self.screen,
"%s" % tasktype,
"User: %s\n"
"Comment: %s\n\n"
"Hosts: %s"
% (taskusers, self.user_comment,
taskhosts),
buttons=['Ok', 'Cancel'],
width=65)
elif self.task_type[1] == 'userinfo':
i = snack.ButtonChoiceWindow(self.screen,
"%s" % tasktype,
"Checking Group Membership "
"for:\n\nUser: %s\nHosts: %s"
% (taskusers, taskhosts),
buttons=['Ok', 'Cancel'],
width=65)
elif self.task_type[1] == 'moduser' and \
self.task_usermod[1] == 'addgrp':
taskgroups = self.task_grpmod[1][0]
i = snack.ButtonChoiceWindow(self.screen,
"%s" % tasktype,
"Adding Group Membership\n\n"
"User: %s"
"Groups: %s\n"
"Hosts: %s"
% (taskusers,
taskgroups, taskhosts),
buttons=['Ok', 'Cancel'],
width=65)
elif self.task_type[1] == 'moduser' and \
self.task_usermod[1] == 'remgrp':
taskgroups = self.task_grpmod[1][0]
i = snack.ButtonChoiceWindow(self.screen,
"%s" % tasktype,
"Removing Group Membership\n\n"
"User: %s"
"Groups: %s\n"
"Hosts: %s"
% (taskusers,
taskgroups, taskhosts),
buttons=['Ok', 'Cancel'],
width=65)
elif self.task_type[1] == 'deluser':
i = snack.ButtonChoiceWindow(self.screen,
"%s" % tasktype,
"WARNING: DELETING USER:\n\n"
"User: %s\nHosts: %s"
% (taskusers, taskhosts),
buttons=['Ok', 'Cancel'],
width=65)
if i != 'cancel':
self.execute()
else:
self.exit_menu()
else:
self.exit_menu()
def execute(self):
self.screen.finish()
print('\nExecuted on: %s'
% datetime.now().strftime('%Y-%m-%d/%H:%M:%S'))
if self.task_hosts[1] == 'hosts_all':
hosts = 'hosts_all'
else:
hosts = '-H %s' % self.task_hostsind[1][0]
if self.task_type[1] == 'adduser':
user = self.task_user[1][0]
subprocess.call(['fab', '%s' % hosts, 'generatepw:user=%s' % user,
'adduser:user=%s,comment=%s'
% (user, self.user_comment)])
if self.task_type[1] == 'userinfo':
user = self.task_user[1][0]
subprocess.call(['fab', '%s' % hosts,
'check_group:user=%s' % user])
if self.task_type[1] == 'moduser':
if self.task_usermod[1] == 'modcmt':
user = self.task_user[1][0]
cstring = str()
for i in self.task_modcmt[1]:
if i.strip():
cstring = cstring + i + " "
cstring = cstring.rstrip()
subprocess.call(['fab', '%s' % hosts,
'mod_comment:user=%s,comment=%s'
% (user, cstring)])
if self.task_usermod[1] == 'addgrp':
userlist = self.task_user[1][0]
groups = self.task_grpmod[1][0].split(',')
for user in userlist.split(","):
for group in groups:
subprocess.call(['fab', '%s' % hosts,
'addgrp:user=%s,group=%s'
% (user, group)])
if self.task_usermod[1] == 'remgrp':
userlist = self.task_user[1][0]
groups = self.task_grpmod[1][0].split(',')
for user in userlist.split(","):
for group in groups:
subprocess.call(['fab', '%s' % hosts,
'remgrp:user=%s,group=%s'
% (user, group)])
if self.task_type[1] == 'deluser':
userlist = self.task_user[1][0]
for user in userlist.split(","):
subprocess.call(['fab', '%s' % hosts,
'deluser:user=%s' % user])
def finish(self):
self.screen.finish()
if lugms_settings.DEBUG:
attrs = vars(task)
print ''.join("%s: %s\n" % item for item in attrs.items())
if __name__ == '__main__':
task = LugTask()
try:
task.get_task()
task.confirm()
except Exception as e:
task.finish()
sys.exit("Exit with exception: %s" % e)
task.finish()
temp.cleanup(ddate=datetime.now())
print('\nDone.')
| |
#!/usr/bin/env python
# Copyright (c) 2017 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import idautils
import idaapi
import idc
import sys
import os
import argparse
import struct
import traceback
import collections
import itertools
import pprint
# Bring in utility libraries.
from util import *
from table import *
from flow import *
from refs import *
from segment import *
OPND_WRITE_FLAGS = {
0: idaapi.CF_CHG1,
1: idaapi.CF_CHG2,
2: idaapi.CF_CHG3,
3: idaapi.CF_CHG4,
4: idaapi.CF_CHG5,
5: idaapi.CF_CHG6,
}
OPND_READ_FLAGS = {
0: idaapi.CF_USE1,
1: idaapi.CF_USE2,
2: idaapi.CF_USE3,
3: idaapi.CF_USE4,
4: idaapi.CF_USE5,
5: idaapi.CF_USE6,
}
OPND_DTYPE_STR = {
0:'dt_byte',
1:'dt_word',
2:'dt_dword',
3:'dt_float',
4:'dt_double',
5:'dt_tbyte',
6:'dt_packreal',
7:'dt_qword',
8:'dt_byte16',
9:'dt_code',
10:'dt_void',
11:'dt_fword',
12:'dt_bitfild',
13:'dt_string',
14:'dt_unicode',
15:'dt_3byte',
16:'dt_ldbl',
17:'dt_byte32',
18:'dt_byte64'
}
OPND_DTYPE_TO_SIZE = {
idaapi.dt_byte: 1,
idaapi.dt_word: 2,
idaapi.dt_dword: 4,
idaapi.dt_float: 4,
idaapi.dt_double: 8,
idaapi.dt_qword: 8,
idaapi.dt_byte16: 16,
idaapi.dt_fword: 6,
idaapi.dt_3byte: 3,
idaapi.dt_byte32: 32,
idaapi.dt_byte64: 64,
}
def get_native_size():
info = idaapi.get_inf_structure()
if info.is_64bit():
return 8
elif info.is_32bit():
return 4
else:
return 2
def get_register_name(reg_id, size=None):
if size is None:
size = get_native_size()
return idaapi.get_reg_name(reg_id, size)
def get_register_info(reg_name):
ri = idaapi.reg_info_t()
success = idaapi.parse_reg_name(reg_name, ri)
return ri
class Operand(object):
def __init__(self, opnd, ea, insn, write, read):
self._operand = opnd
self._ea = ea
self._read = read
self._write= write
self._insn = insn
self._type = opnd.type
self._index_id = None
self._base_id = None
self._displ = None
self._scale = None
if self._type in (idaapi.o_displ, idaapi.o_phrase):
specflag1 = self.op_t.specflag1
specflag2 = self.op_t.specflag2
scale = 1 << ((specflag2 & 0xC0) >> 6)
offset = self.op_t.addr
if specflag1 == 0:
index = None
base_ = self.op_t.reg
elif specflag1 == 1:
index = (specflag2 & 0x38) >> 3
base_ = (specflag2 & 0x07) >> 0
if self.op_t.reg == 0xC:
if base_ & 4:
base_ += 8
if index & 4:
index += 8
self._scale = scale
self._index_id = index
self._base_id = base_
self._displ = offset
def _get_datatype_size(self, dtype):
return OPND_DTYPE_TO_SIZE.get(dtype,0)
def _get_datatypestr_from_dtyp(self, dt_dtyp):
return OPND_DTYPE_STR.get(dt_dtyp,"")
@property
def op_t(self):
return self._operand
@property
def value(self):
return idc.GetOperandValue(self._ea, self.index)
@property
def size(self):
return self._get_datatype_size(self._operand.dtyp)
@property
def text(self):
return idc.GetOpnd(self._ea, self.index)
@property
def dtype(self):
return self._get_datatypestr_from_dtyp(self._operand.dtyp)
@property
def index(self):
return self._operand.n
@property
def type(self):
return self._type
@property
def is_read(self):
return self._read
@property
def is_write(self):
return self._write
@property
def is_void(self):
return self._type == idaapi.o_void
@property
def is_reg(self):
return self._type == idaapi.o_reg
@property
def is_mem(self):
return self._type == idaapi.o_mem
@property
def is_phrase(self):
return self._type == idaapi.o_phrase
@property
def is_displ(self):
return self._type == idaapi.o_displ
@property
def is_imm(self):
return self._type == idaapi.o_imm
@property
def is_far(self):
return self._type == idaapi.o_far
@property
def is_near(self):
return self._type == idaapi.o_near
@property
def is_special(self):
return self._type >= idaapi.o_idpspec0
@property
def has_phrase(self):
return self._type in (idaapi.o_phrase, idaapi.o_displ)
@property
def reg_id(self):
"""ID of the register used in the operand."""
return self._operand.reg
@property
def reg(self):
"""Name of the register used in the operand."""
if self.has_phrase:
size = get_native_size()
return get_register_name(self.reg_id, size)
if self.is_reg:
return get_register_name(self.reg_id, self.size)
@property
def regs(self):
if self.has_phrase:
return set(reg for reg in (self.base, self.index) if reg)
elif self.is_reg:
return {get_register_name(self.reg_id, self.size)}
else:
return set()
@property
def base_reg(self):
if self._base_id is None:
return None
return get_register_name(self._base_id)
@property
def index_reg(self):
if self._index_id is None:
return None
return get_register_name(self._index_id)
@property
def scale(self):
return self._scale
@property
def displ(self):
return self._displ
class Instruction(object):
'''
Instruction objects
'''
def __init__(self, ea):
self._ea = ea
self._insn, _ = decode_instruction(ea)
self._operands = self._make_operands()
def _is_operand_write_to(self, index):
return (self.feature & OPND_WRITE_FLAGS[index])
def _is_operand_read_from(self, index):
return (self.feature & OPND_READ_FLAGS[index])
def _make_operands(self):
operands = []
for index, opnd in enumerate(self._insn.Operands):
if opnd.type == idaapi.o_void:
break
operands.append(Operand(opnd,
self._ea,
insn=self._insn,
write=self._is_operand_write_to(index),
read=self._is_operand_read_from(index)))
return operands
@property
def feature(self):
return self._insn.get_canon_feature()
@property
def opearnds(self):
return self._operands
@property
def mnemonic(self):
return self._insn.get_canon_mnem()
def _signed_from_unsigned64(val):
if val & 0x8000000000000000:
return -0x10000000000000000 + val
return val
def _signed_from_unsigned32(val):
if val & 0x80000000:
return -0x100000000 + val
return val
def _mark_function_args_ms64(referers, dereferences, func_var_data):
for reg in ["rcx", "rdx", "r8", "r9"]:
_mark_func_arg(reg, referers, dereferences, func_var_data)
def _mark_function_args_sysv64(referers, dereferences, func_var_data):
for reg in ["rdi", "rsi", "rdx", "rcx", "r8", "r9"]:
_mark_func_arg(reg, referers, dereferences, func_var_data)
def _mark_function_args_x86(referers, dereferences, func_var_data):
pass #TODO. urgh.
def _translate_reg_32(reg):
return reg
def _translate_reg_64(reg):
return {"edi":"rdi",
"esi":"rsi",
"eax":"rax",
"ebx":"rbx",
"ecx":"rcx",
"edx":"rdx",
"ebp":"rbp",
"esp":"rsp"}.get(reg, reg)
if idaapi.get_inf_structure().is_64bit():
_signed_from_unsigned = _signed_from_unsigned64
_base_ptr = "rbp"
_stack_ptr = "rsp"
_trashed_regs = ["rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11"]
_mark_args = _mark_function_args_sysv64
_translate_reg = _translate_reg_64
elif idaapi.get_inf_structure().is_32bit():
_signed_from_unsigned = _signed_from_unsigned32
_base_ptr = "ebp"
_stack_ptr = "esp"
_trashed_regs = ["eax", "ecx", "edx"]
_mark_args = _mark_function_args_x86
_translate_reg = _translate_reg_32
_base_ptr_format = "[{}+".format(_base_ptr)
_stack_ptr_format = "[{}+".format(_stack_ptr)
def floor_key(d, key):
L1 = list(k for k in d if k <= key)
if len(L1):
return max(L1)
def _get_flags_from_bits(flag):
'''
Translates the flag field in structures (and elsewhere?) into a human readable
string that is compatible with pasting into IDA or something.
Returns an empty string if supplied with -1.
'''
if -1 == flag:
return ""
cls = {
'MASK':1536,
1536:'FF_CODE',
1024:'FF_DATA',
512:'FF_TAIL',
0:'FF_UNK',
}
comm = {
'MASK':1046528,
2048:'FF_COMM',
4096:'FF_REF',
8192:'FF_LINE',
16384:'FF_NAME',
32768:'FF_LABL',
65536:'FF_FLOW',
524288:'FF_VAR',
49152:'FF_ANYNAME',
}
_0type = {
'MASK':15728640,
1048576:'FF_0NUMH',
2097152:'FF_0NUMD',
3145728:'FF_0CHAR',
4194304:'FF_0SEG',
5242880:'FF_0OFF',
6291456:'FF_0NUMB',
7340032:'FF_0NUMO',
8388608:'FF_0ENUM',
9437184:'FF_0FOP',
10485760:'FF_0STRO',
11534336:'FF_0STK',
}
_1type = {
'MASK':251658240,
16777216:'FF_1NUMH',
33554432:'FF_1NUMD',
50331648:'FF_1CHAR',
67108864:'FF_1SEG',
83886080:'FF_1OFF',
100663296:'FF_1NUMB',
117440512:'FF_1NUMO',
134217728:'FF_1ENUM',
150994944:'FF_1FOP',
167772160:'FF_1STRO',
184549376:'FF_1STK',
}
datatype = {
'MASK':4026531840,
0:'FF_BYTE',
268435456:'FF_WORD',
536870912:'FF_DWRD',
805306368:'FF_QWRD',
1073741824:'FF_TBYT',
1342177280:'FF_ASCI',
1610612736:'FF_STRU',
1879048192:'FF_OWRD',
2147483648:'FF_FLOAT',
2415919104:'FF_DOUBLE',
2684354560:'FF_PACKREAL',
2952790016:'FF_ALIGN',
}
flags = set()
flags.add(cls[cls['MASK']&flag])
for category in [comm, _0type, _1type, datatype]:
#the ida docs define, for example, a FF_0VOID = 0 constant in with the rest
# of the 0type constants, but I _think_ that just means
# the field is unused, rather than being specific data
val = category.get(category['MASK']&flag, None)
if val:
flags.add(val)
return flags
def _process_instruction(inst_ea, func_variable):
insn = Instruction(inst_ea)
for opnd in insn.opearnds:
if opnd.has_phrase:
base_ = _translate_reg(opnd.base_reg) if opnd.base_reg else None
index_ = _translate_reg(opnd.index_reg) if opnd.index_reg else None
offset = _signed_from_unsigned(idc.GetOperandValue(inst_ea, opnd.index))
if len(func_variable["stack_vars"].keys()) == 0:
return
if opnd.is_write:
target_on_stack = base_ if base_ == _stack_ptr or base_ == _base_ptr else None
if target_on_stack == _base_ptr:
start_ = floor_key(func_variable["stack_vars"].keys(), offset)
if start_:
end_ = start_ + func_variable["stack_vars"][start_]["size"]
if offset in range(start_, end_):
var_offset = offset - start_
func_variable["stack_vars"][start_]["writes"].append({"ea" :inst_ea, "offset" :var_offset})
func_variable["stack_vars"][start_]["safe"] = True
else:
for key in func_variable["stack_vars"].keys():
if func_variable["stack_vars"][key]["name"] in opnd.text:
func_variable["stack_vars"][key]["safe"] = False
func_variable["stack_vars"].pop(key, None)
break
elif opnd.is_read:
read_on_stack = base_ if base_ == _stack_ptr or base_ == _base_ptr else None
if read_on_stack == _base_ptr:
start_ = floor_key(func_variable["stack_vars"].keys(), offset)
if start_:
end_ = start_ + func_variable["stack_vars"][start_]["size"]
if offset in range(start_, end_):
var_offset = offset - start_
func_variable["stack_vars"][start_]["reads"].append({"ea" :inst_ea, "offset" :var_offset})
func_variable["stack_vars"][start_]["safe"] = True
else:
for key in func_variable["stack_vars"].keys():
if func_variable["stack_vars"][key]["name"] in opnd.text:
func_variable["stack_vars"][key]["safe"] = False
func_variable["stack_vars"].pop(key, None)
break
else:
read_on_stack = base_ if base_ == _stack_ptr or base_ == _base_ptr else None
if read_on_stack:
start_ = floor_key(func_variable["stack_vars"].keys(), offset)
if start_:
end_ = start_ + func_variable["stack_vars"][start_]["size"]
if offset in range(start_, end_):
var_offset = offset - start_
func_variable["stack_vars"][start_]["flags"].add("LOCAL_REFERER")
func_variable["stack_vars"][start_]["referent"].append({"ea" :inst_ea, "offset" :var_offset})
elif opnd.is_reg and opnd.is_read:
if insn.mnemonic in ["push"]:
continue
# The register operand such as `add %rax %rbp` will not have the offset value
# It is set as 0 since we are looking to replace %rbp with %frame = ...
offset = 0 #_signed_from_unsigned(idc.GetOperandValue(inst_ea, opnd.index))
if len(func_variable["stack_vars"].keys()) == 0:
return
for reg in opnd.regs:
if _translate_reg(reg) == _base_ptr:
start_ = floor_key(func_variable["stack_vars"].keys(), offset)
if start_:
end_ = start_ + func_variable["stack_vars"][start_]["size"]
if offset in range(start_, end_+1):
var_offset = offset - start_
func_variable["stack_vars"][start_]["reads"].append({"ea" :inst_ea, "offset" : var_offset})
func_variable["stack_vars"][start_]["safe"] = True
def _process_basic_block(f_ea, block_ea, func_variable):
inst_eas, succ_eas = analyse_block(f_ea, block_ea, True)
for inst_ea in inst_eas:
_process_instruction(inst_ea, func_variable)
_FUNC_UNSAFE_LIST = set()
def build_stack_variable(func_ea):
stack_vars = dict()
frame = idc.GetFrame(func_ea)
if not frame:
return stack_vars
f_name = get_symbol_name(func_ea)
#grab the offset of the stored frame pointer, so that
#we can correlate offsets correctly in referent code
# e.g., EBP+(-0x4) will match up to the -0x4 offset
delta = idc.GetMemberOffset(frame, " s")
if delta == -1:
delta = 0
if f_name not in _FUNC_UNSAFE_LIST:
offset = idc.GetFirstMember(frame)
while -1 != _signed_from_unsigned(offset):
member_name = idc.GetMemberName(frame, offset)
if member_name is None:
offset = idc.GetStrucNextOff(frame, offset)
continue
if (member_name == " r" or member_name == " s"):
offset = idc.GetStrucNextOff(frame, offset)
continue
member_size = idc.GetMemberSize(frame, offset)
if offset >= delta:
offset = idc.GetStrucNextOff(frame, offset)
continue
member_flag = idc.GetMemberFlag(frame, offset)
flag_str = _get_flags_from_bits(member_flag)
member_offset = offset-delta
stack_vars[member_offset] = {"name": member_name,
"size": member_size,
"flags": flag_str,
"writes": list(),
"referent": list(),
"reads": list(),
"safe": False }
offset = idc.GetStrucNextOff(frame, offset)
else:
offset = idc.GetFirstMember(frame)
frame_size = idc.GetFunctionAttr(func_ea, idc.FUNCATTR_FRSIZE)
flag_str = ""
member_offset = _signed_from_unsigned(offset) - delta
stack_vars[member_offset] = {"name": f_name,
"size": frame_size,
"flags": flag_str,
"writes": list(),
"referent": list(),
"reads": list(),
"safe": False }
return stack_vars
def is_instruction_unsafe(inst_ea, func_ea):
""" Returns `True` if the instruction reads from the base ptr and loads
the value to the other registers.
"""
_uses_bp = False
insn = Instruction(inst_ea)
# Special case check for function prologue which prepares
# the function for stack and register uses
# push rbp
# mov rbp, rsp
# ...
if insn.mnemonic in ["push"]:
return False
for opnd in insn.opearnds:
if opnd.is_read and opnd.is_reg:
for reg in opnd.regs:
if _translate_reg(reg) == _base_ptr:
_uses_bp = True
return _uses_bp
def is_function_unsafe(func_ea, blockset):
""" Returns `True` if the function uses bp and it might access the stack variable
indirectly using the base pointer.
"""
if not (idc.GetFunctionFlags(func_ea) & idc.FUNC_FRAME):
return False
for block_ea in blockset:
inst_eas, succ_eas = analyse_block(func_ea, block_ea, True)
for inst_ea in inst_eas:
if is_instruction_unsafe(inst_ea, func_ea):
return True
return False
def collect_function_vars(func_ea, blockset):
DEBUG_PUSH()
if is_function_unsafe(func_ea, blockset):
_FUNC_UNSAFE_LIST.add(get_symbol_name(func_ea))
# Check for the variadic function type; Add the variadic function
# to the list of unsafe functions
func_type = idc.GetType(func_ea)
if (func_type is not None) and ("(" in func_type):
args = func_type[ func_type.index('(')+1: func_type.rindex(')') ]
args_list = [ x.strip() for x in args.split(',')]
if "..." in args_list:
_FUNC_UNSAFE_LIST.add(get_symbol_name(func_ea))
stack_vars = build_stack_variable(func_ea)
processed_blocks = set()
while len(blockset) > 0:
block_ea = blockset.pop()
if block_ea in processed_blocks:
DEBUG("ERROR: Attempting to add same block twice: {0:x}".format(block_ea))
continue
processed_blocks.add(block_ea)
_process_basic_block(func_ea, block_ea, {"stack_vars": stack_vars})
DEBUG_POP()
return stack_vars
def recover_variables(F, func_ea, blockset):
""" Recover the stack variables from the function. It also collect
the instructions referring to the stack variables.
"""
# Checks for the stack frame; return if it is None
if not is_code_by_flags(func_ea) or \
not idc.GetFrame(func_ea):
return
functions = list()
f_name = get_symbol_name(func_ea)
f_ea = idc.GetFunctionAttr(func_ea, idc.FUNCATTR_START)
f_vars = collect_function_vars(func_ea, blockset)
functions.append({"ea":f_ea, "name":f_name, "stackArgs":f_vars})
for offset in f_vars.keys():
if f_vars[offset]["safe"] is False:
continue
var = F.stack_vars.add()
var.sp_offset = offset
var.name = f_vars[offset]["name"]
var.size = f_vars[offset]["size"]
for i in f_vars[offset]["writes"]:
r = var.ref_eas.add()
r.inst_ea = i["ea"]
r.offset = i["offset"]
for i in f_vars[offset]["reads"]:
r = var.ref_eas.add()
r.inst_ea = i["ea"]
r.offset = i["offset"]
| |
# -*- coding: utf-8 -*-
"""
xtr
~~~~~~
An atuo-updated personal website powered by
flask(Python), bootstrap3, sqlite3, webhook.
"""
import os
import urllib2
import mistune
import hmac
import hashlib
from flask import Flask, render_template, request, abort, g, redirect
from sqlite3 import dbapi2 as sqlite3
# Configuration
DEBUG = False
DATABASE = 'xtr.db'
SECRET_KEY = 'development key'
RAW_URL = 'https://raw.githubusercontent.com/MrGba2z/Xtr/master/'
REPO_URL = 'https://github.com/MrGba2z/Xtr/blob/master/'
EMAIL = 'contactxt@icloud.com'
ALLOWED_EXTENSIONS = set(['md', 'markdown', 'mkd'])
# Create application
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_pyfile('config.py')
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
# Initialize a database
# $sqlite3 xtr.db < schema.sql
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/lab')
def lab():
return render_template('lab.html')
@app.route('/article')
@app.route('/article/<path:title>')
def article(title=None):
db = get_db()
if title:
print title
cur = db.execute('select * from article where title = ?', [title])
else:
cur = db.execute('select title, content from article order by id desc')
articles = cur.fetchall()
return render_template('article.html', box=articles)
@app.route('/note')
def note():
db = get_db()
cur = db.execute('select title, tag, content, id from note order by tag')
data = cur.fetchall()
# data: [i1, i2, i3, ...]
# i: (title, tag, content)
note_set = []
notes = []
for i in data:
if i == data[0]:
tag = i[1]
if i[1] == tag:
note_set.append(i)
else:
notes.append(note_set)
note_set = [i]
tag = i[1]
notes.append(note_set)
return render_template('note.html', box=notes)
@app.route('/url')
def url():
return redirect(url_for('index'))
@app.route('/api', methods=['POST'])
def api():
sha1 = request.headers.get('X-Hub-Signature')
body = request.data
hash_maker = hmac.new(app.config['SECRET_TOKEN'], '', hashlib.sha1)
hash_maker.update(body)
if hash_maker.hexdigest() != sha1[5:]:
return 'Unauthorized!'
payload = request.get_json()
commits = payload['commits']
version = payload['after'][:7]
# print 'Current version: %s' % version
resp = 'There is %d commits!\n' % len(commits)
for commit in commits:
# resp += 'In commit x blablabla'
resp += md_added(commit['added'])
resp += md_removed(commit['removed'])
resp += md_modified(commit['modified'])
resp += update(version)
return resp
@app.errorhandler(400)
@app.errorhandler(404)
@app.errorhandler(405)
@app.errorhandler(500)
def page_not_found(error):
return render_template('page_not_found.html'), 404
def md_added(md_files):
ret = ''
if not md_files:
return ret
for md_file in md_files:
if md_type(md_file) == 'Unrelated':
ret += '%s file %s is ignored\n' % (md_type(md_file), md_file)
continue
raw_md_url = RAW_URL + urllib2.quote(md_file)
git_url = REPO_URL + urllib2.quote(md_file)
html_content = mistune_render(raw_md_url)
title = md_file.split('/')[-1].split('.')[0]
if md_type(md_file) == 'Article':
if query_db('select * from article where title = ?', [title]):
ret += 'Article >%s< already exists.\n' % title
else:
db = get_db()
db.execute('insert into article (title, content, git_url) values \
(?, ?, ?)', [title, html_content, git_url])
db.commit()
ret += 'Article >%s< has been added.\n' % title
elif md_type(md_file) == 'Note':
tag = md_file.split('/')[1]
if query_db('select * from note where title = ? and tag = ?',
[title, tag]):
ret += 'Note >%s< already exists.\n' % title
else:
db = get_db()
db.execute('insert into note (title, tag, content, git_url) values \
(?, ?, ?, ?)', [title, tag, html_content, git_url])
db.commit()
ret += 'Note >%s< has been added.\n' % title
else:
ret += 'Error md_add, you should never get this message.\n'
continue
return ret
def md_removed(md_files):
ret = ''
if not md_files:
return ret
for md_file in md_files:
if md_type(md_file) == 'Unrelated':
ret += '%s file %s is ignored.\n' % (md_type(md_file), md_file)
continue
title = md_file.split('/')[-1].split('.')[0]
if md_type(md_file) == 'Article':
if query_db('select * from article where title = ?', [title]):
db = get_db()
db.execute('delete from article where title = ?', [title])
db.commit()
ret += 'Article >%s< has been removed.\n' % title
else:
ret += 'Removing article >%s< failed, not exist.\n' % title
elif md_type(md_file) == 'Note':
if query_db('select * from note where title = ?', [title]):
db = get_db()
db.execute('delete from note where title = ?', [title])
db.commit()
ret += 'Note >%s< has been removed.\n' % title
else:
ret += 'Removing note >%s< failed, not exist.\n' % title
pass
else:
ret += 'Error md_mv, you should never get this message.\n'
continue
return ret
def md_modified(md_files):
ret = ''
if not md_files:
return ret
for md_file in md_files:
if md_type(md_file) == 'Unrelated':
ret += '%s file %s is ignored\n' % (md_type(md_file), md_file)
continue
raw_md_url = RAW_URL + urllib2.quote(md_file)
git_url = REPO_URL + urllib2.quote(md_file)
html_content = mistune_render(raw_md_url)
title = md_file.split('/')[-1].split('.')[0]
if md_type(md_file) == 'Article':
if query_db('select * from article where title = ?', [title]):
db = get_db()
db.execute('update article set content = ? where title = ?',
[html_content, title])
db.commit()
ret += 'Article >%s< has been modified.\n' % title
else:
ret += 'Modify article >%s< failed, not exist.\n' % title
elif md_type(md_file) == 'Note':
if query_db('select * from note where title = ?', [title]):
db = get_db()
db.execute('update note set content = ? where title = ?',
[html_content, title])
db.commit()
ret += 'Note >%s< has been modified.\n' % title
else:
ret += 'Modify note >%s< failed, not exist.\n' % title
else:
ret += 'Error md_mod, you should never get this message.\n'
continue
return ret
def md_type(md_file):
allowed_ext = set(['md', 'markdown', 'mkd'])
factor = md_file.split('/')
if len(factor) == 2:
if factor[0] == 'Article' and \
factor[1].split('.', 1)[1] in allowed_ext:
return 'Article'
elif len(factor) == 3:
if factor[0] == 'Note' and \
factor[2].split('.', 1)[1] in allowed_ext:
return 'Note'
return 'Unrelated'
def mistune_render(url):
raw_content = urllib2.urlopen(url).read().decode('utf-8')
return mistune.markdown(raw_content)
def update(ver):
out = ''
with open('config.py', 'r') as f:
for line in f:
if line.split(' = ')[0] == 'REVISION':
out += 'REVISION = \'%s\'\n' % ver
else:
out += line
with open('config.py', 'w') as f:
f.write(out)
return 'Update site successfully, current revision: %s\n' % ver
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.