gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# coding: utf-8 """ MINDBODY Public API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: v6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class SalePayment(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'amount': 'float', 'method': 'int', 'type': 'str', 'notes': 'str' } attribute_map = { 'id': 'Id', 'amount': 'Amount', 'method': 'Method', 'type': 'Type', 'notes': 'Notes' } def __init__(self, id=None, amount=None, method=None, type=None, notes=None): # noqa: E501 """SalePayment - a model defined in Swagger""" # noqa: E501 self._id = None self._amount = None self._method = None self._type = None self._notes = None self.discriminator = None if id is not None: self.id = id if amount is not None: self.amount = amount if method is not None: self.method = method if type is not None: self.type = type if notes is not None: self.notes = notes @property def id(self): """Gets the id of this SalePayment. # noqa: E501 A unique identifier for this payment. # noqa: E501 :return: The id of this SalePayment. # noqa: E501 :rtype: int """ return self._id @id.setter def id(self, id): """Sets the id of this SalePayment. A unique identifier for this payment. # noqa: E501 :param id: The id of this SalePayment. # noqa: E501 :type: int """ self._id = id @property def amount(self): """Gets the amount of this SalePayment. # noqa: E501 The amount of this payment. # noqa: E501 :return: The amount of this SalePayment. # noqa: E501 :rtype: float """ return self._amount @amount.setter def amount(self, amount): """Sets the amount of this SalePayment. The amount of this payment. # noqa: E501 :param amount: The amount of this SalePayment. # noqa: E501 :type: float """ self._amount = amount @property def method(self): """Gets the method of this SalePayment. # noqa: E501 The method for this payment. # noqa: E501 :return: The method of this SalePayment. # noqa: E501 :rtype: int """ return self._method @method.setter def method(self, method): """Sets the method of this SalePayment. The method for this payment. # noqa: E501 :param method: The method of this SalePayment. # noqa: E501 :type: int """ self._method = method @property def type(self): """Gets the type of this SalePayment. # noqa: E501 The type of payment. # noqa: E501 :return: The type of this SalePayment. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this SalePayment. The type of payment. # noqa: E501 :param type: The type of this SalePayment. # noqa: E501 :type: str """ self._type = type @property def notes(self): """Gets the notes of this SalePayment. # noqa: E501 Notes about this payment. # noqa: E501 :return: The notes of this SalePayment. # noqa: E501 :rtype: str """ return self._notes @notes.setter def notes(self, notes): """Sets the notes of this SalePayment. Notes about this payment. # noqa: E501 :param notes: The notes of this SalePayment. # noqa: E501 :type: str """ self._notes = notes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(SalePayment, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SalePayment): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database v1 Instances action implementations""" import argparse from osc_lib.command import command from osc_lib import utils as osc_utils from oslo_utils import uuidutils from troveclient import exceptions from troveclient.i18n import _ from troveclient.osc.v1 import base from troveclient import utils as trove_utils def get_instances_info(instances): instances_info = [] for instance in instances: # To avoid invoking GET request to trove. instance_info = instance.to_dict() instance_info['flavor_id'] = instance.flavor['id'] instance_info['size'] = '-' if 'volume' in instance_info: instance_info['size'] = instance_info['volume']['size'] instance_info['role'] = '' if 'replica_of' in instance_info: instance_info['role'] = 'replica' if 'replicas' in instance_info: instance_info['role'] = 'primary' if 'datastore' in instance_info: if instance.datastore.get('version'): instance_info['datastore_version'] = instance.\ datastore['version'] instance_info['datastore'] = instance.datastore['type'] if 'access' in instance_info: instance_info['public'] = instance_info["access"].get( "is_public", False) if 'addresses' not in instance_info: instance_info['addresses'] = '' if 'operating_status' not in instance_info: # In case newer version python-troveclient is talking to older # version trove. instance_info['operating_status'] = '' instances_info.append(instance_info) return instances_info def set_attributes_for_print_detail(instance): info = instance.to_dict() info['flavor'] = instance.flavor['id'] if hasattr(instance, 'volume'): info['volume'] = instance.volume['size'] if 'used' in instance.volume: info['volume_used'] = instance.volume['used'] if hasattr(instance, 'ip'): info['ip'] = ', '.join(instance.ip) if hasattr(instance, 'datastore'): info['datastore'] = instance.datastore['type'] info['datastore_version'] = instance.datastore['version'] info['datastore_version_number'] = instance.datastore.get( 'version_number') if hasattr(instance, 'configuration'): info['configuration'] = instance.configuration['id'] if hasattr(instance, 'replica_of'): info['replica_of'] = instance.replica_of['id'] if hasattr(instance, 'replicas'): replicas = [replica['id'] for replica in instance.replicas] info['replicas'] = ', '.join(replicas) if hasattr(instance, 'networks'): info['networks'] = instance.networks['name'] info['networks_id'] = instance.networks['id'] if hasattr(instance, 'fault'): info.pop('fault', None) info['fault'] = instance.fault['message'] info['fault_date'] = instance.fault['created'] if 'details' in instance.fault and instance.fault['details']: info['fault_details'] = instance.fault['details'] if hasattr(instance, 'access'): info['public'] = instance.access.get("is_public", False) info['allowed_cidrs'] = instance.access.get('allowed_cidrs', []) info.pop("access", None) info.pop('links', None) return info class ListDatabaseInstances(command.Lister): _description = _("List database instances") columns = ['ID', 'Name', 'Datastore', 'Datastore Version', 'Status', 'Operating Status', 'Public', 'Addresses', 'Flavor ID', 'Size', 'Role'] admin_columns = columns + ["Server ID", "Tenant ID"] def get_parser(self, prog_name): parser = super(ListDatabaseInstances, self).get_parser(prog_name) parser.add_argument( '--limit', dest='limit', metavar='<limit>', default=None, help=_('Limit the number of results displayed.') ) parser.add_argument( '--marker', dest='marker', metavar='<ID>', type=str, default=None, help=_('Begin displaying the results for IDs greater than the' 'specified marker. When used with ``--limit``, set ' 'this to the last ID displayed in the previous run.') ) parser.add_argument( '--include_clustered', '--include-clustered', dest='include_clustered', action="store_true", default=False, help=_("Include instances that are part of a cluster " "(default %(default)s). --include-clustered may be " "deprecated in the future, retaining just " "--include_clustered.") ) parser.add_argument( '--all-projects', dest='all_projects', action="store_true", default=False, help=_("Include database instances of all projects (admin only)") ) parser.add_argument( '--project-id', help=_("Include database instances of a specific project " "(admin only)") ) return parser def take_action(self, parsed_args): extra_params = {} if parsed_args.all_projects or parsed_args.project_id: db_instances = self.app.client_manager.database.mgmt_instances cols = self.admin_columns if parsed_args.project_id: extra_params['project_id'] = parsed_args.project_id else: db_instances = self.app.client_manager.database.instances cols = self.columns instances = db_instances.list( limit=parsed_args.limit, marker=parsed_args.marker, include_clustered=parsed_args.include_clustered, **extra_params ) if instances: instances_info = get_instances_info(instances) instances = [osc_utils.get_dict_properties(info, cols) for info in instances_info] return cols, instances class ShowDatabaseInstance(command.ShowOne): _description = _("Show instance details") def get_parser(self, prog_name): parser = super(ShowDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', help=_('Instance (name or ID)'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) instance = set_attributes_for_print_detail(instance) return zip(*sorted(instance.items())) class DeleteDatabaseInstance(base.TroveDeleter): _description = _("Deletes an instance.") def get_parser(self, prog_name): parser = super(DeleteDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'instance', nargs='+', metavar='instance', help='Id or name of instance(s).' ) parser.add_argument( '--force', action="store_true", default=False, help=_('Force delete the instance, will reset the instance status ' 'before deleting.'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances # Used for batch deletion self.delete_func = (db_instances.force_delete if parsed_args.force else db_instances.delete) self.resource = 'database instance' ids = [] for instance_id in parsed_args.instance: if not uuidutils.is_uuid_like(instance_id): try: instance_id = trove_utils.get_resource_id_by_name( db_instances, instance_id ) except Exception as e: msg = ("Failed to get database instance %s, error: %s" % (instance_id, str(e))) raise exceptions.CommandError(msg) ids.append(instance_id) self.delete_resources(ids) class CreateDatabaseInstance(command.ShowOne): _description = _("Creates a new database instance.") def get_parser(self, prog_name): parser = super(CreateDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'name', metavar='<name>', help=_("Name of the instance."), ) parser.add_argument( '--flavor', metavar='<flavor>', type=str, help=_("Flavor to create the instance (name or ID). Flavor is not " "required when creating replica instances."), ) parser.add_argument( '--size', metavar='<size>', type=int, default=None, help=_("Size of the instance disk volume in GB. " "Required when volume support is enabled."), ) parser.add_argument( '--volume-type', metavar='<volume_type>', type=str, default=None, help=_("Volume type. Optional when volume support is enabled."), ) parser.add_argument( '--databases', metavar='<database>', nargs="+", default=[], help=_("Optional list of databases."), ) parser.add_argument( '--users', metavar='<user:password>', nargs="+", default=[], help=_("Optional list of users."), ) parser.add_argument( '--backup', metavar='<backup>', default=None, help=_("A backup name or ID."), ) parser.add_argument( '--availability-zone', metavar='<availability_zone>', default=None, help=_("The Zone hint to give to Nova."), ) parser.add_argument( '--datastore', metavar='<datastore>', default=None, help=_("A datastore name or ID."), ) parser.add_argument( '--datastore-version', metavar='<datastore_version>', default=None, help=_("A datastore version name or ID."), ) parser.add_argument( '--datastore-version-number', default=None, help=_('The version number for the database. The version number ' 'is needed for the datastore versions with the same name.'), ) parser.add_argument( '--nic', metavar=('<net-id=<net-uuid>,subnet-id=<subnet-uuid>,' 'ip-address=<ip-address>>'), dest='nics', help=_("Create instance in the given Neutron network. This " "information is used for creating user-facing port for the " "instance. Either network ID or subnet ID (or both) should " "be specified, IP address is optional"), ) parser.add_argument( '--configuration', metavar='<configuration>', default=None, help=_("ID of the configuration group to attach to the instance."), ) parser.add_argument( '--replica-of', metavar='<source_instance>', default=None, help=_("ID or name of an existing instance to replicate from."), ) parser.add_argument( '--replica-count', metavar='<count>', type=int, default=None, help=_("Number of replicas to create (defaults to 1 if " "replica_of specified)."), ) parser.add_argument( '--module', metavar='<module>', type=str, dest='modules', action='append', default=[], help=_("ID or name of the module to apply. Specify multiple " "times to apply multiple modules."), ) parser.add_argument( '--locality', metavar='<policy>', default=None, choices=['affinity', 'anti-affinity'], help=_("Locality policy to use when creating replicas. Choose " "one of %(choices)s."), ) parser.add_argument( '--region', metavar='<region>', type=str, default=None, help=argparse.SUPPRESS, ) parser.add_argument( '--is-public', action='store_true', help="Whether or not to make the instance public.", ) parser.add_argument( '--allowed-cidr', action='append', dest='allowed_cidrs', help="The IP CIDRs that are allowed to access the database " "instance. Repeat for multiple values", ) return parser def take_action(self, parsed_args): database = self.app.client_manager.database db_instances = database.instances if not parsed_args.replica_of and not parsed_args.flavor: raise exceptions.CommandError(_("Please specify a flavor")) if parsed_args.replica_of and parsed_args.flavor: print("Warning: Flavor is ignored for creating replica.") if not parsed_args.replica_of: flavor_id = osc_utils.find_resource( database.flavors, parsed_args.flavor).id else: flavor_id = None volume = None if parsed_args.size is not None and parsed_args.size <= 0: raise exceptions.ValidationError( _("Volume size '%s' must be an integer and greater than 0.") % parsed_args.size) elif parsed_args.size: volume = {"size": parsed_args.size, "type": parsed_args.volume_type} restore_point = None if parsed_args.backup: restore_point = {"backupRef": osc_utils.find_resource( database.backups, parsed_args.backup).id} replica_of = None replica_count = parsed_args.replica_count if parsed_args.replica_of: replica_of = osc_utils.find_resource( db_instances, parsed_args.replica_of) replica_count = replica_count or 1 locality = None if parsed_args.locality: locality = parsed_args.locality if replica_of: raise exceptions.ValidationError( _('Cannot specify locality when adding replicas ' 'to existing master.')) databases = [{'name': value} for value in parsed_args.databases] users = [{'name': n, 'password': p, 'databases': databases} for (n, p) in [z.split(':')[:2] for z in parsed_args.users]] nics = [] if parsed_args.nics: nic_info = {} allowed_keys = { 'net-id': 'network_id', 'subnet-id': 'subnet_id', 'ip-address': 'ip_address' } fields = parsed_args.nics.split(',') for field in fields: field = field.strip() k, v = field.split('=', 1) k = k.strip() v = v.strip() if k not in allowed_keys.keys(): raise exceptions.ValidationError( f"{k} is not allowed." ) if v: nic_info[allowed_keys[k]] = v nics.append(nic_info) modules = [] for module in parsed_args.modules: modules.append(osc_utils.find_resource(database.modules, module).id) access = {'is_public': False} if parsed_args.is_public: access['is_public'] = True if parsed_args.allowed_cidrs: access['allowed_cidrs'] = parsed_args.allowed_cidrs instance = db_instances.create( parsed_args.name, flavor_id=flavor_id, volume=volume, databases=databases, users=users, restorePoint=restore_point, availability_zone=(parsed_args.availability_zone), datastore=parsed_args.datastore, datastore_version=(parsed_args.datastore_version), datastore_version_number=(parsed_args.datastore_version_number), nics=nics, configuration=parsed_args.configuration, replica_of=replica_of, replica_count=replica_count, modules=modules, locality=locality, region_name=parsed_args.region, access=access ) instance = set_attributes_for_print_detail(instance) return zip(*sorted(instance.items())) class ResetDatabaseInstanceStatus(command.Command): _description = _("Set instance service status to ERROR and clear the " "current task status. Mark any running backup operations " "as FAILED.") def get_parser(self, prog_name): parser = super(ResetDatabaseInstanceStatus, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', help=_('ID or name of the instance'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.reset_status(instance) class ResizeDatabaseInstanceFlavor(command.Command): _description = _("Resize an instance with a new flavor") def get_parser(self, prog_name): parser = super(ResizeDatabaseInstanceFlavor, self).get_parser( prog_name ) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance') ) parser.add_argument( 'flavor', type=str, help=_('ID or name of the new flavor.') ) return parser def take_action(self, parsed_args): instance_mgr = self.app.client_manager.database.instances flavor_mgr = self.app.client_manager.database.flavors instance_id = parsed_args.instance if not uuidutils.is_uuid_like(instance_id): instance = osc_utils.find_resource(instance_mgr, instance_id) instance_id = instance.id flavor = osc_utils.find_resource(flavor_mgr, parsed_args.flavor) instance_mgr.resize_instance(instance_id, flavor.id) class UpgradeDatabaseInstance(command.Command): _description = _("Upgrades an instance to a new datastore version.") def get_parser(self, prog_name): parser = super(UpgradeDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.'), ) parser.add_argument( 'datastore_version', metavar='<datastore_version>', help=_('ID or name of the datastore version.'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.upgrade(instance, parsed_args.datastore_version) class ResizeDatabaseInstanceVolume(command.Command): _description = _("Resizes the volume size of an instance.") def get_parser(self, prog_name): parser = super(ResizeDatabaseInstanceVolume, self).get_parser( prog_name ) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.') ) parser.add_argument( 'size', metavar='<size>', type=int, default=None, help=_('New size of the instance disk volume in GB.') ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.resize_volume(instance, parsed_args.size) class ForceDeleteDatabaseInstance(command.Command): _description = _("Force delete an instance.") def get_parser(self, prog_name): parser = ( super(ForceDeleteDatabaseInstance, self).get_parser(prog_name)) parser.add_argument( 'instance', metavar='<instance>', help=_('ID or name of the instance'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.reset_status(instance) try: db_instances.delete(instance) except Exception as e: msg = (_("Failed to delete instance %(instance)s: %(e)s") % {'instance': parsed_args.instance, 'e': e}) raise exceptions.CommandError(msg) class PromoteDatabaseInstanceToReplicaSource(command.Command): _description = _( "Promotes a replica to be the new replica source of its set.") def get_parser(self, prog_name): parser = super(PromoteDatabaseInstanceToReplicaSource, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.promote_to_replica_source(instance) class RestartDatabaseInstance(command.Command): _description = _("Restarts an instance.") def get_parser(self, prog_name): parser = super(RestartDatabaseInstance, self).get_parser( prog_name ) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.') ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.restart(instance) class EjectDatabaseInstanceReplicaSource(command.Command): _description = _("Ejects a replica source from its set.") def get_parser(self, prog_name): parser = super(EjectDatabaseInstanceReplicaSource, self).get_parser( prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.eject_replica_source(instance) class UpdateDatabaseInstance(command.Command): _description = _("Updates an instance: Edits name, " "configuration, or replica source.") def get_parser(self, prog_name): parser = super(UpdateDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.'), ) parser.add_argument( '--name', metavar='<name>', type=str, default=None, help=_('ID or name of the instance.'), ) parser.add_argument( '--configuration', metavar='<configuration>', type=str, default=None, help=_('ID of the configuration reference to attach.'), ) parser.add_argument( '--detach-replica-source', '--detach_replica_source', dest='detach_replica_source', action="store_true", default=False, help=_('Detach the replica instance from its replication source. ' '--detach-replica-source may be deprecated in the future ' 'in favor of just --detach_replica_source'), ) parser.add_argument( '--remove-configuration', '--remove_configuration', dest='remove_configuration', action="store_true", default=False, help=_('Drops the current configuration reference.'), ) public_group = parser.add_mutually_exclusive_group() public_group.add_argument( '--is-public', dest='public', default=None, action='store_true', help="Make the database instance accessible to public.", ) public_group.add_argument( '--is-private', dest='public', default=None, action='store_false', help="Make the database instance inaccessible to public.", ) parser.add_argument( '--allowed-cidr', action='append', dest='allowed_cidrs', help="The IP CIDRs that are allowed to access the database " "instance. Repeat for multiple values", ) return parser def take_action(self, parsed_args): instance_mgr = self.app.client_manager.database.instances instance_id = parsed_args.instance if not uuidutils.is_uuid_like(instance_id): instance_id = osc_utils.find_resource(instance_mgr, instance_id) instance_mgr.update(instance_id, parsed_args.configuration, parsed_args.name, parsed_args.detach_replica_source, parsed_args.remove_configuration, is_public=parsed_args.public, allowed_cidrs=parsed_args.allowed_cidrs) class DetachDatabaseInstanceReplica(command.Command): _description = _("Detaches a replica instance " "from its replication source.") def get_parser(self, prog_name): parser = super(DetachDatabaseInstanceReplica, self).get_parser( prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.'), ) return parser def take_action(self, parsed_args): db_instances = self.app.client_manager.database.instances instance = osc_utils.find_resource(db_instances, parsed_args.instance) db_instances.update(instance, detach_replica_source=True) class RebootDatabaseInstance(command.Command): _description = _("Reboots an instance(the Nova server).") def get_parser(self, prog_name): parser = super(RebootDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.')) return parser def take_action(self, parsed_args): instance_id = parsed_args.instance if not uuidutils.is_uuid_like(instance_id): instance_mgr = self.app.client_manager.database.instances instance_id = osc_utils.find_resource(instance_mgr, instance_id) mgmt_instance_mgr = self.app.client_manager.database.mgmt_instances mgmt_instance_mgr.reboot(instance_id) class RebuildDatabaseInstance(command.Command): _description = _("Rebuilds an instance(the Nova server).") def get_parser(self, prog_name): parser = super(RebuildDatabaseInstance, self).get_parser(prog_name) parser.add_argument( 'instance', metavar='<instance>', type=str, help=_('ID or name of the instance.')) parser.add_argument( 'image', metavar='<image-id>', help=_('ID of the new guest image.')) return parser def take_action(self, parsed_args): instance_id = parsed_args.instance if not uuidutils.is_uuid_like(instance_id): instance_mgr = self.app.client_manager.database.instances instance_id = osc_utils.find_resource(instance_mgr, instance_id) mgmt_instance_mgr = self.app.client_manager.database.mgmt_instances mgmt_instance_mgr.rebuild(instance_id, parsed_args.image)
# -*- coding: iso-8859-15 -*- # # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import unittest import datetime import ast import pytest from pathlib import Path from lingua_franca import load_language from lingua_franca.internal import UnsupportedLanguageError from mycroft.configuration import Configuration, set_default_lf_lang from mycroft.util.format import ( TimeResolution, nice_number, nice_time, nice_date, nice_date_time, nice_year, nice_duration, nice_duration_dt, pronounce_number, date_time_format, join_list ) # The majority of these tests are explicitly written for English. # Changes to the default language are tested below. default_lang = "en-us" load_language(default_lang) NUMBERS_FIXTURE_EN = { 1.435634: '1.436', 2: '2', 5.0: '5', 0.027: '0.027', 0.5: 'a half', 1.333: '1 and a third', 2.666: '2 and 2 thirds', 0.25: 'a forth', 1.25: '1 and a forth', 0.75: '3 forths', 1.75: '1 and 3 forths', 3.4: '3 and 2 fifths', 16.8333: '16 and 5 sixths', 12.5714: '12 and 4 sevenths', 9.625: '9 and 5 eigths', 6.777: '6 and 7 ninths', 3.1: '3 and a tenth', 2.272: '2 and 3 elevenths', 5.583: '5 and 7 twelveths', 8.384: '8 and 5 thirteenths', 0.071: 'a fourteenth', 6.466: '6 and 7 fifteenths', 8.312: '8 and 5 sixteenths', 2.176: '2 and 3 seventeenths', 200.722: '200 and 13 eighteenths', 7.421: '7 and 8 nineteenths', 0.05: 'a twentyith' } class TestNiceNumberFormat(unittest.TestCase): def test_convert_float_to_nice_number(self): for number, number_str in NUMBERS_FIXTURE_EN.items(): self.assertEqual(nice_number(number), number_str, 'should format {} as {} and not {}'.format( number, number_str, nice_number(number))) def test_specify_denominator(self): self.assertEqual(nice_number(5.5, denominators=[1, 2, 3]), '5 and a half', 'should format 5.5 as 5 and a half not {}'.format( nice_number(5.5, denominators=[1, 2, 3]))) self.assertEqual(nice_number(2.333, denominators=[1, 2]), '2.333', 'should format 2.333 as 2.333 not {}'.format( nice_number(2.333, denominators=[1, 2]))) def test_no_speech(self): self.assertEqual(nice_number(6.777, speech=False), '6 7/9', 'should format 6.777 as 6 7/9 not {}'.format( nice_number(6.777, speech=False))) self.assertEqual(nice_number(6.0, speech=False), '6', 'should format 6.0 as 6 not {}'.format( nice_number(6.0, speech=False))) def test_unknown_language(self): """ An unknown / unhandled language should return the string representation of the input number. """ self.assertEqual(nice_number(5.5, lang='as-fd'), '5.5', 'should format 5.5 as 5.5 not {}'.format( nice_number(5.5, lang='as-df'))) class TestPronounceNumber(unittest.TestCase): def test_convert_int(self): self.assertEqual(pronounce_number(0), "zero") self.assertEqual(pronounce_number(1), "one") self.assertEqual(pronounce_number(10), "ten") self.assertEqual(pronounce_number(15), "fifteen") self.assertEqual(pronounce_number(20), "twenty") self.assertEqual(pronounce_number(27), "twenty seven") self.assertEqual(pronounce_number(30), "thirty") self.assertEqual(pronounce_number(33), "thirty three") def test_convert_negative_int(self): self.assertEqual(pronounce_number(-1), "minus one") self.assertEqual(pronounce_number(-10), "minus ten") self.assertEqual(pronounce_number(-15), "minus fifteen") self.assertEqual(pronounce_number(-20), "minus twenty") self.assertEqual(pronounce_number(-27), "minus twenty seven") self.assertEqual(pronounce_number(-30), "minus thirty") self.assertEqual(pronounce_number(-33), "minus thirty three") def test_convert_decimals(self): self.assertEqual(pronounce_number(1.234), "one point two three") self.assertEqual(pronounce_number(21.234), "twenty one point two three") self.assertEqual(pronounce_number(21.234, places=1), "twenty one point two") self.assertEqual(pronounce_number(21.234, places=0), "twenty one") self.assertEqual(pronounce_number(21.234, places=3), "twenty one point two three four") self.assertEqual(pronounce_number(21.234, places=4), "twenty one point two three four") self.assertEqual(pronounce_number(21.234, places=5), "twenty one point two three four") self.assertEqual(pronounce_number(-1.234), "minus one point two three") self.assertEqual(pronounce_number(-21.234), "minus twenty one point two three") self.assertEqual(pronounce_number(-21.234, places=1), "minus twenty one point two") self.assertEqual(pronounce_number(-21.234, places=0), "minus twenty one") self.assertEqual(pronounce_number(-21.234, places=3), "minus twenty one point two three four") self.assertEqual(pronounce_number(-21.234, places=4), "minus twenty one point two three four") self.assertEqual(pronounce_number(-21.234, places=5), "minus twenty one point two three four") def test_convert_hundreds(self): self.assertEqual(pronounce_number(100), "one hundred") self.assertEqual(pronounce_number(666), "six hundred and sixty six") self.assertEqual(pronounce_number(1456), "fourteen fifty six") self.assertEqual(pronounce_number(103254654), "one hundred and three " "million, two hundred " "and fifty four " "thousand, six hundred " "and fifty four") self.assertEqual(pronounce_number(1512457), "one million, five hundred" " and twelve thousand, " "four hundred and fifty " "seven") self.assertEqual(pronounce_number(209996), "two hundred and nine " "thousand, nine hundred " "and ninety six") self.assertEqual(pronounce_number(95505896639631893), "ninety five quadrillion, five hundred and five " "trillion, eight hundred and ninety six billion, six " "hundred and thirty nine million, six hundred and " "thirty one thousand, eight hundred and ninety three") self.assertEqual(pronounce_number(95505896639631893, short_scale=False), "ninety five thousand five hundred and five billion, " "eight hundred and ninety six thousand six hundred " "and thirty nine million, six hundred and thirty one " "thousand, eight hundred and ninety three") def test_convert_scientific_notation(self): self.assertEqual(pronounce_number(0, scientific=True), "zero") self.assertEqual(pronounce_number(33, scientific=True), "three point three times ten to the power of one") self.assertEqual(pronounce_number(299792458, scientific=True), "two point nine nine times ten to the power of eight") self.assertEqual(pronounce_number(299792458, places=6, scientific=True), "two point nine nine seven nine two five times " "ten to the power of eight") self.assertEqual(pronounce_number(1.672e-27, places=3, scientific=True), "one point six seven two times ten to the power of " "negative twenty seven") def test_large_numbers(self): self.assertEqual( pronounce_number(299792458, short_scale=True), "two hundred and ninety nine million, seven hundred " "and ninety two thousand, four hundred and fifty eight") self.assertEqual( pronounce_number(299792458, short_scale=False), "two hundred and ninety nine million, seven hundred " "and ninety two thousand, four hundred and fifty eight") self.assertEqual( pronounce_number(100034000000299792458, short_scale=True), "one hundred quintillion, thirty four quadrillion, " "two hundred and ninety nine million, seven hundred " "and ninety two thousand, four hundred and fifty eight") self.assertEqual( pronounce_number(100034000000299792458, short_scale=False), "one hundred trillion, thirty four thousand billion, " "two hundred and ninety nine million, seven hundred " "and ninety two thousand, four hundred and fifty eight") self.assertEqual( pronounce_number(10000000000, short_scale=True), "ten billion") self.assertEqual( pronounce_number(1000000000000, short_scale=True), "one trillion") # TODO maybe beautify this self.assertEqual( pronounce_number(1000001, short_scale=True), "one million, one") # def nice_time(dt, lang="en-us", speech=True, use_24hour=False, # use_ampm=False): class TestNiceDateFormat(unittest.TestCase): @classmethod def setUpClass(cls): # Read date_time_test.json files for test data cls.test_config = {} p = Path(date_time_format.config_path) for sub_dir in [x for x in p.iterdir() if x.is_dir()]: if (sub_dir / 'date_time_test.json').exists(): print("Getting test for " + str(sub_dir / 'date_time_test.json')) with (sub_dir / 'date_time_test.json').open() as f: cls.test_config[sub_dir.parts[-1]] = json.loads(f.read()) def test_convert_times(self): dt = datetime.datetime(2017, 1, 31, 13, 22, 3) # Verify defaults haven't changed self.assertEqual(nice_time(dt), nice_time(dt, "en-us", True, False, False)) self.assertEqual(nice_time(dt), "one twenty two") self.assertEqual(nice_time(dt, use_ampm=True), "one twenty two p.m.") self.assertEqual(nice_time(dt, speech=False), "1:22") self.assertEqual(nice_time(dt, speech=False, use_ampm=True), "1:22 PM") self.assertEqual(nice_time(dt, speech=False, use_24hour=True), "13:22") self.assertEqual(nice_time(dt, speech=False, use_24hour=True, use_ampm=True), "13:22") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True), "thirteen twenty two") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False), "thirteen twenty two") dt = datetime.datetime(2017, 1, 31, 13, 0, 3) self.assertEqual(nice_time(dt), "one o'clock") self.assertEqual(nice_time(dt, use_ampm=True), "one p.m.") self.assertEqual(nice_time(dt, speech=False), "1:00") self.assertEqual(nice_time(dt, speech=False, use_ampm=True), "1:00 PM") self.assertEqual(nice_time(dt, speech=False, use_24hour=True), "13:00") self.assertEqual(nice_time(dt, speech=False, use_24hour=True, use_ampm=True), "13:00") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True), "thirteen hundred") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False), "thirteen hundred") dt = datetime.datetime(2017, 1, 31, 13, 2, 3) self.assertEqual(nice_time(dt), "one oh two") self.assertEqual(nice_time(dt, use_ampm=True), "one oh two p.m.") self.assertEqual(nice_time(dt, speech=False), "1:02") self.assertEqual(nice_time(dt, speech=False, use_ampm=True), "1:02 PM") self.assertEqual(nice_time(dt, speech=False, use_24hour=True), "13:02") self.assertEqual(nice_time(dt, speech=False, use_24hour=True, use_ampm=True), "13:02") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True), "thirteen zero two") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False), "thirteen zero two") dt = datetime.datetime(2017, 1, 31, 0, 2, 3) self.assertEqual(nice_time(dt), "twelve oh two") self.assertEqual(nice_time(dt, use_ampm=True), "twelve oh two a.m.") self.assertEqual(nice_time(dt, speech=False), "12:02") self.assertEqual(nice_time(dt, speech=False, use_ampm=True), "12:02 AM") self.assertEqual(nice_time(dt, speech=False, use_24hour=True), "00:02") self.assertEqual(nice_time(dt, speech=False, use_24hour=True, use_ampm=True), "00:02") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True), "zero zero zero two") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False), "zero zero zero two") dt = datetime.datetime(2018, 2, 8, 1, 2, 33) self.assertEqual(nice_time(dt), "one oh two") self.assertEqual(nice_time(dt, use_ampm=True), "one oh two a.m.") self.assertEqual(nice_time(dt, speech=False), "1:02") self.assertEqual(nice_time(dt, speech=False, use_ampm=True), "1:02 AM") self.assertEqual(nice_time(dt, speech=False, use_24hour=True), "01:02") self.assertEqual(nice_time(dt, speech=False, use_24hour=True, use_ampm=True), "01:02") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True), "zero one zero two") self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False), "zero one zero two") dt = datetime.datetime(2017, 1, 31, 12, 15, 9) self.assertEqual(nice_time(dt), "quarter past twelve") self.assertEqual(nice_time(dt, use_ampm=True), "quarter past twelve p.m.") dt = datetime.datetime(2017, 1, 31, 5, 30, 00) self.assertEqual(nice_time(dt, use_ampm=True), "half past five a.m.") dt = datetime.datetime(2017, 1, 31, 1, 45, 00) self.assertEqual(nice_time(dt), "quarter to two") def test_nice_date(self): for lang in self.test_config: set_default_lf_lang(lang) i = 1 while (self.test_config[lang].get('test_nice_date') and self.test_config[lang]['test_nice_date'].get(str(i))): p = self.test_config[lang]['test_nice_date'][str(i)] dp = ast.literal_eval(p['datetime_param']) np = ast.literal_eval(p['now']) dt = datetime.datetime( dp[0], dp[1], dp[2], dp[3], dp[4], dp[5]) now = None if not np else datetime.datetime( np[0], np[1], np[2], np[3], np[4], np[5]) print('Testing for ' + lang + ' that ' + str(dt) + ' is date ' + p['assertEqual']) self.assertEqual(p['assertEqual'], nice_date(dt, lang=lang, now=now)) i = i + 1 # test all days in a year for all languages, # that some output is produced for lang in self.test_config: set_default_lf_lang(lang) for dt in (datetime.datetime(2017, 12, 30, 0, 2, 3) + datetime.timedelta(n) for n in range(368)): self.assertTrue(len(nice_date(dt, lang=lang)) > 0) set_default_lf_lang(default_lang) def test_nice_date_time(self): for lang in self.test_config: set_default_lf_lang(lang) i = 1 while (self.test_config[lang].get('test_nice_date_time') and self.test_config[lang]['test_nice_date_time'].get(str(i))): p = self.test_config[lang]['test_nice_date_time'][str(i)] dp = ast.literal_eval(p['datetime_param']) np = ast.literal_eval(p['now']) dt = datetime.datetime( dp[0], dp[1], dp[2], dp[3], dp[4], dp[5]) now = None if not np else datetime.datetime( np[0], np[1], np[2], np[3], np[4], np[5]) print('Testing for ' + lang + ' that ' + str(dt) + ' is date time ' + p['assertEqual']) self.assertEqual( p['assertEqual'], nice_date_time( dt, lang=lang, now=now, use_24hour=ast.literal_eval(p['use_24hour']), use_ampm=ast.literal_eval(p['use_ampm']))) i = i + 1 set_default_lf_lang(default_lang) def test_nice_year(self): for lang in self.test_config: set_default_lf_lang(lang) i = 1 while (self.test_config[lang].get('test_nice_year') and self.test_config[lang]['test_nice_year'].get(str(i))): p = self.test_config[lang]['test_nice_year'][str(i)] dp = ast.literal_eval(p['datetime_param']) dt = datetime.datetime( dp[0], dp[1], dp[2], dp[3], dp[4], dp[5]) print('Testing for ' + lang + ' that ' + str(dt) + ' is year ' + p['assertEqual']) self.assertEqual(p['assertEqual'], nice_year( dt, lang=lang, bc=ast.literal_eval(p['bc']))) i = i + 1 set_default_lf_lang(default_lang) # Test all years from 0 to 9999 for all languages, # that some output is produced for lang in self.test_config: set_default_lf_lang(lang) print("Test all years in " + lang) for i in range(1, 9999): dt = datetime.datetime(i, 1, 31, 13, 2, 3) self.assertTrue(len(nice_year(dt, lang=lang)) > 0) # Looking through the date sequence can be helpful # print(nice_year(dt, lang=lang)) set_default_lf_lang(default_lang) def test_join(self): self.assertEqual(join_list(None, "and"), "") self.assertEqual(join_list([], "and"), "") self.assertEqual(join_list(["a"], "and"), "a") self.assertEqual(join_list(["a", "b"], "and"), "a and b") self.assertEqual(join_list(["a", "b"], "or"), "a or b") self.assertEqual(join_list(["a", "b", "c"], "and"), "a, b and c") self.assertEqual(join_list(["a", "b", "c"], "or"), "a, b or c") self.assertEqual(join_list(["a", "b", "c"], "or", ";"), "a; b or c") self.assertEqual(join_list(["a", "b", "c", "d"], "or"), "a, b, c or d") self.assertEqual(join_list([1, "b", 3, "d"], "or"), "1, b, 3 or d") class TestNiceDurationFuncs(unittest.TestCase): def test_nice_duration(self): self.assertEqual(nice_duration(1), "one second") self.assertEqual(nice_duration(3), "three seconds") self.assertEqual(nice_duration(1, speech=False), "0:01") self.assertEqual(nice_duration(1, resolution=TimeResolution.MINUTES), "under a minute") self.assertEqual(nice_duration(61), "one minute one second") self.assertEqual(nice_duration(61, speech=False), "1:01") self.assertEqual(nice_duration(3600), "one hour") self.assertEqual(nice_duration(3600, speech=False), "1h") self.assertEqual(nice_duration(3660, speech=False), "1:01:00") self.assertEqual(nice_duration(3607, speech=False), "1:00:07") self.assertEqual(nice_duration(36000, speech=False), "10h") self.assertEqual(nice_duration(5000), "one hour twenty three minutes and twenty seconds") self.assertEqual(nice_duration(5000, speech=False), "1:23:20") self.assertEqual(nice_duration(50000), "thirteen hours fifty three minutes and twenty seconds") # nopep8 self.assertEqual(nice_duration(50000, resolution=TimeResolution.MINUTES), "thirteen hours fifty three minutes") self.assertEqual(nice_duration(50000, resolution=TimeResolution.HOURS), "thirteen hours") self.assertEqual(nice_duration(50000, speech=False), "13:53:20") self.assertEqual(nice_duration(500000), "five days eighteen hours fifty three minutes and twenty seconds") # nopep8 self.assertEqual(nice_duration(500000, speech=False), "5d 18:53:20") self.assertEqual(nice_duration(datetime.timedelta(seconds=500000), speech=False), "5d 18:53:20") self.assertEqual(nice_duration(1.250575, resolution=TimeResolution.MILLISECONDS), "one point two five seconds") self.assertEqual(nice_duration(0.25, resolution=TimeResolution.MILLISECONDS), "zero point two five seconds") self.assertEqual( nice_duration(0.25, speech=False, resolution=TimeResolution.MILLISECONDS), "0:00.250") self.assertEqual( nice_duration(0.2, speech=False, resolution=TimeResolution.MILLISECONDS), "0:00.200") self.assertEqual(nice_duration(360000.254, resolution=TimeResolution.SECONDS, speech=False), "4d 4h") self.assertEqual(nice_duration(360000.254325, resolution=TimeResolution.MILLISECONDS, speech=False), "4d 4:00:00.254") self.assertEqual(nice_duration(360365.254, resolution=TimeResolution.MILLISECONDS, speech=False), "4d 4:06:05.254") self.assertEqual(nice_duration(0), "zero seconds") self.assertEqual(nice_duration(0, speech=False), "0:00") self.assertEqual(nice_duration(0, resolution=TimeResolution.MINUTES), "zero minutes") self.assertEqual(nice_duration(30, resolution=TimeResolution.MINUTES), "under a minute") # test clock output self.assertEqual(nice_duration(60, resolution=TimeResolution.HOURS, clock=True, speech=False), "0:01:00") self.assertEqual(nice_duration(1, resolution=TimeResolution.MINUTES, clock=True, speech=False), "0:01") self.assertEqual(nice_duration(0.25, resolution=TimeResolution.HOURS, clock=True, speech=False), "0:00:00") self.assertEqual(nice_duration(0.25, resolution=TimeResolution.MINUTES, clock=True, speech=False), "0:00") self.assertEqual(nice_duration(0.25, clock=True, speech=False), "0:00") self.assertEqual(nice_duration(0.25, resolution=TimeResolution.MILLISECONDS, clock=True, speech=False), "0:00.250") self.assertEqual(nice_duration(60, resolution=TimeResolution.YEARS, clock=True, speech=False), "0y") def test_nice_duration_dt(self): with pytest.raises(Exception): nice_duration_dt(123.45, "foo") with pytest.warns(UserWarning): nice_duration_dt(123, 456) self.assertEqual( nice_duration_dt(datetime.datetime(2019, 12, 25, 20, 30), date2=datetime.datetime(2019, 10, 31, 8, 00), # nopep8 speech=False), "55d 12h 30m") self.assertEqual(nice_duration_dt( datetime.datetime(2019, 1, 1), date2=datetime.datetime(2018, 1, 1)), "one year") self.assertEqual(nice_duration_dt( datetime.datetime(2019, 1, 1), date2=datetime.datetime(2018, 1, 1), speech=False), "1y") self.assertEqual(nice_duration_dt( datetime.datetime(2019, 1, 1), date2=datetime.datetime(2018, 1, 1), use_years=False), "three hundred and sixty five days") self.assertEqual(nice_duration_dt( datetime.datetime(2019, 1, 2), date2=datetime.datetime(2018, 1, 1)), "one year one day") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1)), "zero seconds") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), speech=False), "0:00") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.MINUTES), "zero minutes") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.MINUTES, speech=False), "0m") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.HOURS), "zero hours") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.HOURS, speech=False), "0h") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.DAYS), "zero days") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.DAYS, speech=False), "0d") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.YEARS), "zero years") self.assertEqual(nice_duration_dt(datetime.datetime(1, 1, 1), datetime.datetime(1, 1, 1), resolution=TimeResolution.YEARS, speech=False), "0y") class TestErrorHandling(unittest.TestCase): @unittest.skip("Put back when Lingua Franca deprecates " "'lang=None' and 'lang=Invalid'") def test_invalid_lang_code(self): dt = datetime.datetime(2018, 2, 4, 0, 2, 3) with self.assertRaises(UnsupportedLanguageError): nice_date(dt, lang='invalid', now=dt) if __name__ == "__main__": unittest.main()
from coapthon import defines from coapthon.messages.message import Message from coapthon.messages.option import Option __author__ = 'Giacomo Tanganelli' class Request(Message): """ Class to handle the Requests. """ def __init__(self): """ Initialize a Request message. """ super(Request, self).__init__() @property def uri_path(self): """ Return the Uri-Path of a request :rtype : String :return: the Uri-Path """ value = [] for option in self.options: if option.number == defines.OptionRegistry.URI_PATH.number: value.append(str(option.value) + '/') value = "".join(value) value = value[:-1] return value @uri_path.setter def uri_path(self, path): """ Set the Uri-Path of a request. :param path: the Uri-Path """ path = path.strip("/") tmp = path.split("?") path = tmp[0] paths = path.split("/") for p in paths: option = Option() option.number = defines.OptionRegistry.URI_PATH.number option.value = p self.add_option(option) if len(tmp) > 1: query = tmp[1] self.uri_query = query @uri_path.deleter def uri_path(self): """ Delete the Uri-Path of a request. """ self.del_option_by_number(defines.OptionRegistry.URI_PATH.number) @property def uri_query(self): """ Get the Uri-Query of a request. :return: the Uri-Query :rtype : String :return: the Uri-Query string """ value = [] for option in self.options: if option.number == defines.OptionRegistry.URI_QUERY.number: value.append(str(option.value)) return "&".join(value) @uri_query.setter def uri_query(self, value): """ Adds a query. :param value: the query """ del self.uri_query queries = value.split("&") for q in queries: option = Option() option.number = defines.OptionRegistry.URI_QUERY.number option.value = str(q) self.add_option(option) @uri_query.deleter def uri_query(self): """ Delete a query. """ self.del_option_by_number(defines.OptionRegistry.URI_QUERY.number) @property def accept(self): """ Get the Accept option of a request. :return: the Accept value or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.ACCEPT.number: return option.value return None @accept.setter def accept(self, value): """ Add an Accept option to a request. :param value: the Accept value """ if value in defines.Content_types.values(): option = Option() option.number = defines.OptionRegistry.ACCEPT.number option.value = value self.add_option(option) @accept.deleter def accept(self): """ Delete the Accept options of a request. """ self.del_option_by_number(defines.OptionRegistry.ACCEPT.number) @property def if_match(self): """ Get the If-Match option of a request. :return: the If-Match values or [] if not specified by the request :rtype : list """ value = [] for option in self.options: if option.number == defines.OptionRegistry.IF_MATCH.number: value.append(option.value) return value @if_match.setter def if_match(self, values): """ Set the If-Match option of a request. :param values: the If-Match values :type values : list """ assert isinstance(values, list) for v in values: option = Option() option.number = defines.OptionRegistry.IF_MATCH.number option.value = v self.add_option(option) @if_match.deleter def if_match(self): """ Delete the If-Match options of a request. """ self.del_option_by_number(defines.OptionRegistry.IF_MATCH.number) @property def if_none_match(self): """ Get the if-none-match option of a request. :return: True, if if-none-match is present :rtype : bool """ for option in self.options: if option.number == defines.OptionRegistry.IF_NONE_MATCH.number: return True return False def add_if_none_match(self): """ Add the if-none-match option to the request. """ option = Option() option.number = defines.OptionRegistry.IF_NONE_MATCH.number option.value = None self.add_option(option) @if_none_match.deleter def if_none_match(self): """ Delete the if-none-match option in the request. """ self.del_option_by_number(defines.OptionRegistry.IF_NONE_MATCH.number) @property def proxy_uri(self): """ Get the Proxy-Uri option of a request. :return: the Proxy-Uri values or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.PROXY_URI.number: return option.value return None @proxy_uri.setter def proxy_uri(self, value): """ Set the Proxy-Uri option of a request. :param value: the Proxy-Uri value """ option = Option() option.number = defines.OptionRegistry.PROXY_URI.number option.value = str(value) self.add_option(option) @proxy_uri.deleter def proxy_uri(self): """ Delete the Proxy-Uri option of a request. """ self.del_option_by_number(defines.OptionRegistry.PROXY_URI.number) @property def proxy_schema(self): """ Get the Proxy-Schema option of a request. :return: the Proxy-Schema values or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.PROXY_SCHEME.number: return option.value return None @proxy_schema.setter def proxy_schema(self, value): """ Set the Proxy-Schema option of a request. :param value: the Proxy-Schema value """ option = Option() option.number = defines.OptionRegistry.PROXY_SCHEME.number option.value = str(value) self.add_option(option) @proxy_schema.deleter def proxy_schema(self): """ Delete the Proxy-Schema option of a request. """ self.del_option_by_number(defines.OptionRegistry.PROXY_SCHEME.number) @property def uri_query(self): """ Get the uri_query option of a request. :return: the uri_query values or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.URI_QUERY.number: return option.value return None @uri_query.setter def uri_query(self, value): """ Set the uri_query option of a request. :param value: the uri_query value """ option = Option() option.number = defines.OptionRegistry.URI_QUERY.number option.value = str(value) self.add_option(option) @uri_query.deleter def uri_query(self): self.del_option_by_number(defines.OptionRegistry.URI_QUERY.number)
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest2 class Test_ACLEntity(unittest2.TestCase): def _getTargetClass(self): from gcloud.storage.acl import _ACLEntity return _ACLEntity def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor_default_identifier(self): TYPE = 'type' entity = self._makeOne(TYPE) self.assertEqual(entity.type, TYPE) self.assertEqual(entity.identifier, None) self.assertEqual(entity.get_roles(), set()) def test_ctor_w_identifier(self): TYPE = 'type' ID = 'id' entity = self._makeOne(TYPE, ID) self.assertEqual(entity.type, TYPE) self.assertEqual(entity.identifier, ID) self.assertEqual(entity.get_roles(), set()) def test___str__no_identifier(self): TYPE = 'type' entity = self._makeOne(TYPE) self.assertEqual(str(entity), TYPE) def test___str__w_identifier(self): TYPE = 'type' ID = 'id' entity = self._makeOne(TYPE, ID) self.assertEqual(str(entity), '%s-%s' % (TYPE, ID)) def test_grant_simple(self): TYPE = 'type' ROLE = 'role' entity = self._makeOne(TYPE) entity.grant(ROLE) self.assertEqual(entity.get_roles(), set([ROLE])) def test_grant_duplicate(self): TYPE = 'type' ROLE1 = 'role1' ROLE2 = 'role2' entity = self._makeOne(TYPE) entity.grant(ROLE1) entity.grant(ROLE2) entity.grant(ROLE1) self.assertEqual(entity.get_roles(), set([ROLE1, ROLE2])) def test_revoke_miss(self): TYPE = 'type' ROLE = 'nonesuch' entity = self._makeOne(TYPE) entity.revoke(ROLE) self.assertEqual(entity.get_roles(), set()) def test_revoke_hit(self): TYPE = 'type' ROLE1 = 'role1' ROLE2 = 'role2' entity = self._makeOne(TYPE) entity.grant(ROLE1) entity.grant(ROLE2) entity.revoke(ROLE1) self.assertEqual(entity.get_roles(), set([ROLE2])) def test_grant_read(self): TYPE = 'type' entity = self._makeOne(TYPE) entity.grant_read() self.assertEqual(entity.get_roles(), set([entity.READER_ROLE])) def test_grant_write(self): TYPE = 'type' entity = self._makeOne(TYPE) entity.grant_write() self.assertEqual(entity.get_roles(), set([entity.WRITER_ROLE])) def test_grant_owner(self): TYPE = 'type' entity = self._makeOne(TYPE) entity.grant_owner() self.assertEqual(entity.get_roles(), set([entity.OWNER_ROLE])) def test_revoke_read(self): TYPE = 'type' entity = self._makeOne(TYPE) entity.grant(entity.READER_ROLE) entity.revoke_read() self.assertEqual(entity.get_roles(), set()) def test_revoke_write(self): TYPE = 'type' entity = self._makeOne(TYPE) entity.grant(entity.WRITER_ROLE) entity.revoke_write() self.assertEqual(entity.get_roles(), set()) def test_revoke_owner(self): TYPE = 'type' entity = self._makeOne(TYPE) entity.grant(entity.OWNER_ROLE) entity.revoke_owner() self.assertEqual(entity.get_roles(), set()) class Test_ACL(unittest2.TestCase): def _getTargetClass(self): from gcloud.storage.acl import ACL return ACL def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor(self): acl = self._makeOne() self.assertEqual(acl.entities, {}) self.assertFalse(acl.loaded) def test__ensure_loaded(self): acl = self._makeOne() def _reload(): acl._really_loaded = True acl.reload = _reload acl._ensure_loaded() self.assertTrue(acl._really_loaded) def test_client_is_abstract(self): acl = self._makeOne() self.assertRaises(NotImplementedError, lambda: acl.client) def test_reset(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True acl.entity(TYPE, ID) acl.reset() self.assertEqual(acl.entities, {}) self.assertFalse(acl.loaded) def test___iter___empty_eager(self): acl = self._makeOne() acl.loaded = True self.assertEqual(list(acl), []) def test___iter___empty_lazy(self): acl = self._makeOne() def _reload(): acl.loaded = True acl.reload = _reload self.assertEqual(list(acl), []) self.assertTrue(acl.loaded) def test___iter___non_empty_no_roles(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True acl.entity(TYPE, ID) self.assertEqual(list(acl), []) def test___iter___non_empty_w_roles(self): TYPE = 'type' ID = 'id' ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.entity(TYPE, ID) entity.grant(ROLE) self.assertEqual(list(acl), [{'entity': '%s-%s' % (TYPE, ID), 'role': ROLE}]) def test___iter___non_empty_w_empty_role(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True entity = acl.entity(TYPE, ID) entity.grant('') self.assertEqual(list(acl), []) def test_entity_from_dict_allUsers_eager(self): ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.entity_from_dict({'entity': 'allUsers', 'role': ROLE}) self.assertEqual(entity.type, 'allUsers') self.assertEqual(entity.identifier, None) self.assertEqual(entity.get_roles(), set([ROLE])) self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_entity_from_dict_allAuthenticatedUsers(self): ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.entity_from_dict({'entity': 'allAuthenticatedUsers', 'role': ROLE}) self.assertEqual(entity.type, 'allAuthenticatedUsers') self.assertEqual(entity.identifier, None) self.assertEqual(entity.get_roles(), set([ROLE])) self.assertEqual(list(acl), [{'entity': 'allAuthenticatedUsers', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_entity_from_dict_string_w_hyphen(self): ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.entity_from_dict({'entity': 'type-id', 'role': ROLE}) self.assertEqual(entity.type, 'type') self.assertEqual(entity.identifier, 'id') self.assertEqual(entity.get_roles(), set([ROLE])) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_entity_from_dict_string_wo_hyphen(self): ROLE = 'role' acl = self._makeOne() acl.loaded = True self.assertRaises(ValueError, acl.entity_from_dict, {'entity': 'bogus', 'role': ROLE}) self.assertEqual(list(acl.get_entities()), []) def test_has_entity_miss_str_eager(self): acl = self._makeOne() acl.loaded = True self.assertFalse(acl.has_entity('nonesuch')) def test_has_entity_miss_str_lazy(self): acl = self._makeOne() def _reload(): acl.loaded = True acl.reload = _reload self.assertFalse(acl.has_entity('nonesuch')) self.assertTrue(acl.loaded) def test_has_entity_miss_entity(self): from gcloud.storage.acl import _ACLEntity TYPE = 'type' ID = 'id' entity = _ACLEntity(TYPE, ID) acl = self._makeOne() acl.loaded = True self.assertFalse(acl.has_entity(entity)) def test_has_entity_hit_str(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True acl.entity(TYPE, ID) self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID))) def test_has_entity_hit_entity(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True entity = acl.entity(TYPE, ID) self.assertTrue(acl.has_entity(entity)) def test_get_entity_miss_str_no_default_eager(self): acl = self._makeOne() acl.loaded = True self.assertEqual(acl.get_entity('nonesuch'), None) def test_get_entity_miss_str_no_default_lazy(self): acl = self._makeOne() def _reload(): acl.loaded = True acl.reload = _reload self.assertEqual(acl.get_entity('nonesuch'), None) self.assertTrue(acl.loaded) def test_get_entity_miss_entity_no_default(self): from gcloud.storage.acl import _ACLEntity TYPE = 'type' ID = 'id' entity = _ACLEntity(TYPE, ID) acl = self._makeOne() acl.loaded = True self.assertEqual(acl.get_entity(entity), None) def test_get_entity_miss_str_w_default(self): DEFAULT = object() acl = self._makeOne() acl.loaded = True self.assertTrue(acl.get_entity('nonesuch', DEFAULT) is DEFAULT) def test_get_entity_miss_entity_w_default(self): from gcloud.storage.acl import _ACLEntity DEFAULT = object() TYPE = 'type' ID = 'id' entity = _ACLEntity(TYPE, ID) acl = self._makeOne() acl.loaded = True self.assertTrue(acl.get_entity(entity, DEFAULT) is DEFAULT) def test_get_entity_hit_str(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True acl.entity(TYPE, ID) self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID))) def test_get_entity_hit_entity(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True entity = acl.entity(TYPE, ID) self.assertTrue(acl.has_entity(entity)) def test_add_entity_miss_eager(self): from gcloud.storage.acl import _ACLEntity TYPE = 'type' ID = 'id' ROLE = 'role' entity = _ACLEntity(TYPE, ID) entity.grant(ROLE) acl = self._makeOne() acl.loaded = True acl.add_entity(entity) self.assertTrue(acl.loaded) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_add_entity_miss_lazy(self): from gcloud.storage.acl import _ACLEntity TYPE = 'type' ID = 'id' ROLE = 'role' entity = _ACLEntity(TYPE, ID) entity.grant(ROLE) acl = self._makeOne() def _reload(): acl.loaded = True acl.reload = _reload acl.add_entity(entity) self.assertTrue(acl.loaded) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) self.assertTrue(acl.loaded) def test_add_entity_hit(self): from gcloud.storage.acl import _ACLEntity TYPE = 'type' ID = 'id' ENTITY_VAL = '%s-%s' % (TYPE, ID) ROLE = 'role' entity = _ACLEntity(TYPE, ID) entity.grant(ROLE) acl = self._makeOne() acl.loaded = True before = acl.entity(TYPE, ID) acl.add_entity(entity) self.assertTrue(acl.loaded) self.assertFalse(acl.get_entity(ENTITY_VAL) is before) self.assertTrue(acl.get_entity(ENTITY_VAL) is entity) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_entity_miss(self): TYPE = 'type' ID = 'id' ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.entity(TYPE, ID) self.assertTrue(acl.loaded) entity.grant(ROLE) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_entity_hit(self): TYPE = 'type' ID = 'id' ROLE = 'role' acl = self._makeOne() acl.loaded = True before = acl.entity(TYPE, ID) before.grant(ROLE) entity = acl.entity(TYPE, ID) self.assertTrue(entity is before) self.assertEqual(list(acl), [{'entity': 'type-id', 'role': ROLE}]) self.assertEqual(list(acl.get_entities()), [entity]) def test_user(self): ID = 'id' ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.user(ID) entity.grant(ROLE) self.assertEqual(entity.type, 'user') self.assertEqual(entity.identifier, ID) self.assertEqual(list(acl), [{'entity': 'user-%s' % ID, 'role': ROLE}]) def test_group(self): ID = 'id' ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.group(ID) entity.grant(ROLE) self.assertEqual(entity.type, 'group') self.assertEqual(entity.identifier, ID) self.assertEqual(list(acl), [{'entity': 'group-%s' % ID, 'role': ROLE}]) def test_domain(self): ID = 'id' ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.domain(ID) entity.grant(ROLE) self.assertEqual(entity.type, 'domain') self.assertEqual(entity.identifier, ID) self.assertEqual(list(acl), [{'entity': 'domain-%s' % ID, 'role': ROLE}]) def test_all(self): ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.all() entity.grant(ROLE) self.assertEqual(entity.type, 'allUsers') self.assertEqual(entity.identifier, None) self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}]) def test_all_authenticated(self): ROLE = 'role' acl = self._makeOne() acl.loaded = True entity = acl.all_authenticated() entity.grant(ROLE) self.assertEqual(entity.type, 'allAuthenticatedUsers') self.assertEqual(entity.identifier, None) self.assertEqual(list(acl), [{'entity': 'allAuthenticatedUsers', 'role': ROLE}]) def test_get_entities_empty_eager(self): acl = self._makeOne() acl.loaded = True self.assertEqual(acl.get_entities(), []) def test_get_entities_empty_lazy(self): acl = self._makeOne() def _reload(): acl.loaded = True acl.reload = _reload self.assertEqual(acl.get_entities(), []) self.assertTrue(acl.loaded) def test_get_entities_nonempty(self): TYPE = 'type' ID = 'id' acl = self._makeOne() acl.loaded = True entity = acl.entity(TYPE, ID) self.assertEqual(acl.get_entities(), [entity]) def test_reload_missing(self): # https://github.com/GoogleCloudPlatform/gcloud-python/issues/652 ROLE = 'role' connection = _Connection({}) client = _Client(connection) acl = self._makeOne() acl.reload_path = '/testing/acl' acl.loaded = True acl.entity('allUsers', ROLE) acl.reload(client=client) self.assertEqual(list(acl), []) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'GET') self.assertEqual(kw[0]['path'], '/testing/acl') def test_reload_empty_result_clears_local(self): ROLE = 'role' connection = _Connection({'items': []}) client = _Client(connection) acl = self._makeOne() acl.reload_path = '/testing/acl' acl.loaded = True acl.entity('allUsers', ROLE) acl.reload(client=client) self.assertTrue(acl.loaded) self.assertEqual(list(acl), []) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'GET') self.assertEqual(kw[0]['path'], '/testing/acl') def test_reload_nonempty_result(self): ROLE = 'role' connection = _Connection( {'items': [{'entity': 'allUsers', 'role': ROLE}]}) client = _Client(connection) acl = self._makeOne() acl.reload_path = '/testing/acl' acl.loaded = True acl.reload(client=client) self.assertTrue(acl.loaded) self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}]) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'GET') self.assertEqual(kw[0]['path'], '/testing/acl') def test_save_none_set_none_passed(self): connection = _Connection() client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.save(client=client) kw = connection._requested self.assertEqual(len(kw), 0) def test_save_existing_missing_none_passed(self): connection = _Connection({}) client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True acl.save(client=client) self.assertEqual(list(acl), []) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/testing') self.assertEqual(kw[0]['data'], {'acl': []}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_save_no_acl(self): ROLE = 'role' AFTER = [{'entity': 'allUsers', 'role': ROLE}] connection = _Connection({'acl': AFTER}) client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True acl.entity('allUsers').grant(ROLE) acl.save(client=client) self.assertEqual(list(acl), AFTER) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/testing') self.assertEqual(kw[0]['data'], {'acl': AFTER}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_save_w_acl(self): ROLE1 = 'role1' ROLE2 = 'role2' STICKY = {'entity': 'allUsers', 'role': ROLE2} new_acl = [{'entity': 'allUsers', 'role': ROLE1}] connection = _Connection({'acl': [STICKY] + new_acl}) client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True acl.save(new_acl, client=client) entries = list(acl) self.assertEqual(len(entries), 2) self.assertTrue(STICKY in entries) self.assertTrue(new_acl[0] in entries) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/testing') self.assertEqual(kw[0]['data'], {'acl': new_acl}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) def test_save_prefefined_invalid(self): connection = _Connection() client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True with self.assertRaises(ValueError): acl.save_predefined('bogus', client=client) def test_save_predefined_valid(self): PREDEFINED = 'private' connection = _Connection({'acl': []}) client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True acl.save_predefined(PREDEFINED, client=client) entries = list(acl) self.assertEqual(len(entries), 0) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/testing') self.assertEqual(kw[0]['data'], {'acl': []}) self.assertEqual(kw[0]['query_params'], {'projection': 'full', 'predefinedAcl': PREDEFINED}) def test_save_predefined_valid_w_alternate_query_param(self): # Cover case where subclass overrides _PREDEFINED_QUERY_PARAM PREDEFINED = 'private' connection = _Connection({'acl': []}) client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True acl._PREDEFINED_QUERY_PARAM = 'alternate' acl.save_predefined(PREDEFINED, client=client) entries = list(acl) self.assertEqual(len(entries), 0) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/testing') self.assertEqual(kw[0]['data'], {'acl': []}) self.assertEqual(kw[0]['query_params'], {'projection': 'full', 'alternate': PREDEFINED}) def test_clear(self): ROLE1 = 'role1' ROLE2 = 'role2' STICKY = {'entity': 'allUsers', 'role': ROLE2} connection = _Connection({'acl': [STICKY]}) client = _Client(connection) acl = self._makeOne() acl.save_path = '/testing' acl.loaded = True acl.entity('allUsers', ROLE1) acl.clear(client=client) self.assertEqual(list(acl), [STICKY]) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]['method'], 'PATCH') self.assertEqual(kw[0]['path'], '/testing') self.assertEqual(kw[0]['data'], {'acl': []}) self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) class Test_BucketACL(unittest2.TestCase): def _getTargetClass(self): from gcloud.storage.acl import BucketACL return BucketACL def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor(self): NAME = 'name' bucket = _Bucket(NAME) acl = self._makeOne(bucket) self.assertEqual(acl.entities, {}) self.assertFalse(acl.loaded) self.assertTrue(acl.bucket is bucket) self.assertEqual(acl.reload_path, '/b/%s/acl' % NAME) self.assertEqual(acl.save_path, '/b/%s' % NAME) class Test_DefaultObjectACL(unittest2.TestCase): def _getTargetClass(self): from gcloud.storage.acl import DefaultObjectACL return DefaultObjectACL def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor(self): NAME = 'name' bucket = _Bucket(NAME) acl = self._makeOne(bucket) self.assertEqual(acl.entities, {}) self.assertFalse(acl.loaded) self.assertTrue(acl.bucket is bucket) self.assertEqual(acl.reload_path, '/b/%s/defaultObjectAcl' % NAME) self.assertEqual(acl.save_path, '/b/%s' % NAME) class Test_ObjectACL(unittest2.TestCase): def _getTargetClass(self): from gcloud.storage.acl import ObjectACL return ObjectACL def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_ctor(self): NAME = 'name' BLOB_NAME = 'blob-name' bucket = _Bucket(NAME) blob = _Blob(bucket, BLOB_NAME) acl = self._makeOne(blob) self.assertEqual(acl.entities, {}) self.assertFalse(acl.loaded) self.assertTrue(acl.blob is blob) self.assertEqual(acl.reload_path, '/b/%s/o/%s/acl' % (NAME, BLOB_NAME)) self.assertEqual(acl.save_path, '/b/%s/o/%s' % (NAME, BLOB_NAME)) class _Blob(object): def __init__(self, bucket, blob): self.bucket = bucket self.blob = blob @property def path(self): return '%s/o/%s' % (self.bucket.path, self.blob) class _Bucket(object): def __init__(self, name): self.name = name @property def path(self): return '/b/%s' % self.name class _Connection(object): _delete_ok = False def __init__(self, *responses): self._responses = responses self._requested = [] self._deleted = [] def api_request(self, **kw): from gcloud.exceptions import NotFound self._requested.append(kw) try: response, self._responses = self._responses[0], self._responses[1:] except: # pragma: NO COVER raise NotFound('miss') else: return response class _Client(object): def __init__(self, connection): self.connection = connection
''' Copyright (c) 2011-2015, Agora Games, LLC All rights reserved. https://github.com/agoragames/haigha/blob/master/LICENSE.txt ''' from struct import Struct from datetime import datetime from decimal import Decimal class Reader(object): """ A stream-like reader object that supports all the basic data types of AMQP. """ class ReaderError(Exception): '''Base class for all reader errors.''' class BufferUnderflow(ReaderError): '''Not enough bytes to satisfy the request.''' class FieldError(ReaderError): '''Unsupported field type was read.''' def __init__(self, source, start_pos=0, size=None): """ source should be a bytearray, io object with a read() method, another Reader, a plain or unicode string. Can be allocated over a slice of source. """ # Note: buffer used here because unpack_from can't accept an array, # which I think is related to http://bugs.python.org/issue7827 if isinstance(source, bytearray): self._input = buffer(source) elif isinstance(source, Reader): self._input = source._input elif hasattr(source, 'read'): self._input = buffer(source.read()) elif isinstance(source, str): self._input = buffer(source) elif isinstance(source, unicode): self._input = buffer(source.encode('utf8')) else: raise ValueError( 'Reader needs a bytearray, io object or plain string') self._start_pos = self._pos = start_pos self._end_pos = len(self._input) if size: self._end_pos = self._start_pos + size def __str__(self): return ''.join(['\\x%s' % (c.encode('hex')) for c in self._input[self._start_pos:self._end_pos]]) def tell(self): ''' Current position ''' return self._pos def seek(self, offset, whence=0): ''' Simple seek. Follows standard interface. ''' if whence == 0: self._pos = self._start_pos + offset elif whence == 1: self._pos += offset else: self._pos = (self._end_pos - 1) + offset def _check_underflow(self, n): ''' Raise BufferUnderflow if there's not enough bytes to satisfy the request. ''' if self._pos + n > self._end_pos: raise self.BufferUnderflow() def __len__(self): ''' Supports content framing in Channel ''' return self._end_pos - self._start_pos def buffer(self): ''' Get a copy of the buffer that this is reading from. Returns a buffer object ''' return buffer(self._input, self._start_pos, (self._end_pos - self._start_pos)) def read(self, n): """ Read n bytes. Will raise BufferUnderflow if there's not enough bytes in the buffer. """ self._check_underflow(n) rval = self._input[self._pos:self._pos + n] self._pos += n return rval def read_bit(self): """ Read a single boolean value, returns 0 or 1. Convience for single bit fields. Will raise BufferUnderflow if there's not enough bytes in the buffer. """ # Perform a faster check on underflow if self._pos >= self._end_pos: raise self.BufferUnderflow() result = ord(self._input[self._pos]) & 1 self._pos += 1 return result def read_bits(self, num): ''' Read several bits packed into the same field. Will return as a list. The bit field itself is little-endian, though the order of the returned array looks big-endian for ease of decomposition. Reader('\x02').read_bits(2) -> [False,True] Reader('\x08').read_bits(2) -> [False,True,False,False,False,False,False,False] first_field, second_field = Reader('\x02').read_bits(2) Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise ValueError if num < 0 or num > 9 ''' # Perform a faster check on underflow if self._pos >= self._end_pos: raise self.BufferUnderflow() if num < 0 or num >= 9: raise ValueError("8 bits per field") field = ord(self._input[self._pos]) result = map(lambda x: field >> x & 1, xrange(num)) self._pos += 1 return result def read_octet(self, unpacker=Struct('B').unpack_from, size=Struct('B').size): """ Read one byte, return as an integer Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed """ # Technically should look at unpacker.size, but skipping that is way # faster and this method is the most-called of the readers if self._pos >= self._end_pos: raise self.BufferUnderflow() rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def read_short(self, unpacker=Struct('>H').unpack_from, size=Struct('>H').size): """ Read an unsigned 16-bit integer Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed """ self._check_underflow(size) rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def read_long(self, unpacker=Struct('>I').unpack_from, size=Struct('>I').size): """ Read an unsigned 32-bit integer Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed """ self._check_underflow(size) rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def read_longlong(self, unpacker=Struct('>Q').unpack_from, size=Struct('>Q').size): """ Read an unsigned 64-bit integer Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed """ self._check_underflow(size) rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def read_shortstr(self): """ Read a utf-8 encoded string that's stored in up to 255 bytes. Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise UnicodeDecodeError if the text is mal-formed. Will raise struct.error if the data is malformed """ slen = self.read_octet() return self.read(slen) def read_longstr(self): """ Read a string that's up to 2**32 bytes, the encoding isn't specified in the AMQP spec, so just return it as a plain Python string. Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed """ slen = self.read_long() return self.read(slen) def read_timestamp(self): """ Read and AMQP timestamp, which is a 64-bit integer representing seconds since the Unix epoch in 1-second resolution. Return as a Python datetime.datetime object, expressed as UTC time. Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed To support different RabbitMQ constructs, the timestamp is parsed in different basis from seconds to microseconds. """ ts = self.read_longlong() try: # Try to parse the timestamp in seconds return datetime.utcfromtimestamp(ts) except ValueError: try: # Try to parse the timestamp in milliseconds return datetime.utcfromtimestamp(ts / 1e3) except ValueError: try: # Try to parse the timestamp in microseconds return datetime.utcfromtimestamp(ts / 1e6) except ValueError: # Failed to parse the timestamp raise def read_table(self): """ Read an AMQP table, and return as a Python dictionary. Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise UnicodeDecodeError if the text is mal-formed. Will raise struct.error if the data is malformed """ # Only need to check underflow on the table once tlen = self.read_long() self._check_underflow(tlen) end_pos = self._pos + tlen result = {} while self._pos < end_pos: name = self._field_shortstr() result[name] = self._read_field() return result def _read_field(self): ''' Read a single byte for field type, then read the value. ''' ftype = self._input[self._pos] self._pos += 1 reader = self.field_type_map.get(ftype) if reader: return reader(self) raise Reader.FieldError('Unknown field type %s', ftype) def _field_bool(self): result = ord(self._input[self._pos]) & 1 self._pos += 1 return result def _field_short_short_int(self, unpacker=Struct('b').unpack_from, size=Struct('b').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_short_short_uint(self, unpacker=Struct('B').unpack_from, size=Struct('B').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_short_int(self, unpacker=Struct('>h').unpack_from, size=Struct('>h').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_short_uint(self, unpacker=Struct('>H').unpack_from, size=Struct('>H').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_long_int(self, unpacker=Struct('>i').unpack_from, size=Struct('>i').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_long_uint(self, unpacker=Struct('>I').unpack_from, size=Struct('>I').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_long_long_int(self, unpacker=Struct('>q').unpack_from, size=Struct('>q').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_long_long_uint(self, unpacker=Struct('>Q').unpack_from, size=Struct('>Q').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_float(self, unpacker=Struct('>f').unpack_from, size=Struct('>f').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval def _field_double(self, unpacker=Struct('>d').unpack_from, size=Struct('>d').size): rval = unpacker(self._input, self._pos)[0] self._pos += size return rval # Coding to http://dev.rabbitmq.com/wiki/Amqp091Errata#section_3 which # differs from spec in that the value is signed. def _field_decimal(self): d = self._field_short_short_uint() n = self._field_long_int() return Decimal(n) / Decimal(10 ** d) def _field_shortstr(self): slen = self._field_short_short_uint() rval = self._input[self._pos:self._pos + slen] self._pos += slen return rval def _field_longstr(self): slen = self._field_long_uint() rval = self._input[self._pos:self._pos + slen] self._pos += slen return rval def _field_array(self): alen = self.read_long() end_pos = self._pos + alen rval = [] while self._pos < end_pos: rval.append(self._read_field()) return rval def _field_timestamp(self): """ Read and AMQP timestamp, which is a 64-bit integer representing seconds since the Unix epoch in 1-second resolution. Return as a Python datetime.datetime object, expressed as UTC time. Will raise BufferUnderflow if there's not enough bytes in the buffer. Will raise struct.error if the data is malformed """ return datetime.utcfromtimestamp(self._field_long_long_uint()) def _field_bytearray(self): slen = self._field_long_uint() rval = bytearray(self._input[self._pos:self._pos + slen]) self._pos += slen return rval def _field_none(self): return None # A mapping for quick lookups # Rabbit and Qpid 0.9.1 mapping # Based on: http://www.rabbitmq.com/amqp-0-9-1-errata.html (3. Field types) field_type_map = { 't': _field_bool, 'b': _field_short_short_int, 's': _field_short_int, 'I': _field_long_int, 'l': _field_long_long_int, 'f': _field_float, 'd': _field_double, 'D': _field_decimal, 'S': _field_longstr, 'A': _field_array, 'T': _field_timestamp, 'F': read_table, 'V': _field_none, 'x': _field_bytearray, } # 0.9.1 spec mapping # field_type_map = { # 't' : _field_bool, # 'b' : _field_short_short_int, # 'B' : _field_short_short_uint, # 'U' : _field_short_int, # 'u' : _field_short_uint, # 'I' : _field_long_int, # 'i' : _field_long_uint, # 'L' : _field_long_long_int, # 'l' : _field_long_long_uint, # 'f' : _field_float, # 'd' : _field_double, # 'D' : _field_decimal, # 's' : _field_shortstr, # 'S' : _field_longstr, # 'A' : _field_array, # 'T' : _field_timestamp, # 'F' : read_table, # 'V' : _field_none, # }
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. # Python import datetime import os import urlparse # Django from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_str, smart_text from django.utils.text import slugify from django.core.exceptions import ValidationError from django.utils.timezone import now, make_aware, get_default_timezone # AWX from awx.api.versioning import reverse from awx.main.models.base import * # noqa from awx.main.models.notifications import ( NotificationTemplate, JobNotificationMixin, ) from awx.main.models.unified_jobs import * # noqa from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin from awx.main.utils import update_scm_url from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook from awx.main.fields import ImplicitRoleField from awx.main.models.rbac import ( ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR, ) from awx.main.fields import JSONField __all__ = ['Project', 'ProjectUpdate'] class ProjectOptions(models.Model): SCM_TYPE_CHOICES = [ ('', _('Manual')), ('git', _('Git')), ('hg', _('Mercurial')), ('svn', _('Subversion')), ('insights', _('Red Hat Insights')), ] class Meta: abstract = True # Project files must be available on the server in folders directly # beneath the path specified by settings.PROJECTS_ROOT. There is no way # via the API to upload/update a project or its playbooks; this must be # done by other means for now. @classmethod def get_local_path_choices(cls): if os.path.exists(settings.PROJECTS_ROOT): paths = [x.decode('utf-8') for x in os.listdir(settings.PROJECTS_ROOT) if (os.path.isdir(os.path.join(settings.PROJECTS_ROOT, x)) and not x.startswith('.') and not x.startswith('_'))] qs = Project.objects used_paths = qs.values_list('local_path', flat=True) return [x for x in paths if x not in used_paths] else: return [] local_path = models.CharField( max_length=1024, blank=True, help_text=_('Local path (relative to PROJECTS_ROOT) containing ' 'playbooks and related files for this project.') ) scm_type = models.CharField( max_length=8, choices=SCM_TYPE_CHOICES, blank=True, default='', verbose_name=_('SCM Type'), help_text=_("Specifies the source control system used to store the project."), ) scm_url = models.CharField( max_length=1024, blank=True, default='', verbose_name=_('SCM URL'), help_text=_("The location where the project is stored."), ) scm_branch = models.CharField( max_length=256, blank=True, default='', verbose_name=_('SCM Branch'), help_text=_('Specific branch, tag or commit to checkout.'), ) scm_clean = models.BooleanField( default=False, help_text=_('Discard any local changes before syncing the project.'), ) scm_delete_on_update = models.BooleanField( default=False, help_text=_('Delete the project before syncing.'), ) credential = models.ForeignKey( 'Credential', related_name='%(class)ss', blank=True, null=True, default=None, on_delete=models.SET_NULL, ) timeout = models.IntegerField( blank=True, default=0, help_text=_("The amount of time (in seconds) to run before the task is canceled."), ) def clean_scm_type(self): return self.scm_type or '' def clean_scm_url(self): if self.scm_type == 'insights': self.scm_url = settings.INSIGHTS_URL_BASE scm_url = unicode(self.scm_url or '') if not self.scm_type: return '' try: scm_url = update_scm_url(self.scm_type, scm_url, check_special_cases=False) except ValueError as e: raise ValidationError((e.args or (_('Invalid SCM URL.'),))[0]) scm_url_parts = urlparse.urlsplit(scm_url) if self.scm_type and not any(scm_url_parts): raise ValidationError(_('SCM URL is required.')) return unicode(self.scm_url or '') def clean_credential(self): if not self.scm_type: return None cred = self.credential if not cred and self.scm_type == 'insights': raise ValidationError(_("Insights Credential is required for an Insights Project.")) elif cred: if self.scm_type == 'insights': if cred.kind != 'insights': raise ValidationError(_("Credential kind must be 'insights'.")) elif cred.kind != 'scm': raise ValidationError(_("Credential kind must be 'scm'.")) try: if self.scm_type == 'insights': self.scm_url = settings.INSIGHTS_URL_BASE scm_url = update_scm_url(self.scm_type, self.scm_url, check_special_cases=False) scm_url_parts = urlparse.urlsplit(scm_url) # Prefer the username/password in the URL, if provided. scm_username = scm_url_parts.username or cred.username or '' if scm_url_parts.password or cred.password: scm_password = '********' else: scm_password = '' try: update_scm_url(self.scm_type, self.scm_url, scm_username, scm_password) except ValueError as e: raise ValidationError((e.args or (_('Invalid credential.'),))[0]) except ValueError: pass return cred def get_project_path(self, check_if_exists=True): local_path = os.path.basename(self.local_path) if local_path and not local_path.startswith('.'): proj_path = os.path.join(settings.PROJECTS_ROOT, local_path) if not check_if_exists or os.path.exists(smart_str(proj_path)): return proj_path @property def playbooks(self): results = [] project_path = self.get_project_path() if project_path: for dirpath, dirnames, filenames in os.walk(smart_str(project_path)): if skip_directory(dirpath): continue for filename in filenames: playbook = could_be_playbook(project_path, dirpath, filename) if playbook is not None: results.append(smart_text(playbook)) return sorted(results, key=lambda x: smart_str(x).lower()) @property def inventories(self): results = [] project_path = self.get_project_path() if project_path: # Cap the number of results, because it could include lots max_inventory_listing = 50 for dirpath, dirnames, filenames in os.walk(smart_str(project_path)): if skip_directory(dirpath): continue for filename in filenames: inv_path = could_be_inventory(project_path, dirpath, filename) if inv_path is not None: results.append(smart_text(inv_path)) if len(results) > max_inventory_listing: break if len(results) > max_inventory_listing: break return sorted(results, key=lambda x: smart_str(x).lower()) def get_lock_file(self): ''' We want the project path in name only, we don't care if it exists or not. This method will just append .lock onto the full directory path. ''' proj_path = self.get_project_path(check_if_exists=False) if not proj_path: return None return proj_path + '.lock' class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin): ''' A project represents a playbook git repo that can access a set of inventories ''' SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')] class Meta: app_label = 'main' ordering = ('id',) organization = models.ForeignKey( 'Organization', blank=True, null=True, on_delete=models.CASCADE, related_name='projects', ) scm_delete_on_next_update = models.BooleanField( default=False, editable=False, ) scm_update_on_launch = models.BooleanField( default=False, help_text=_('Update the project when a job is launched that uses the project.'), ) scm_update_cache_timeout = models.PositiveIntegerField( default=0, blank=True, help_text=_('The number of seconds after the last project update ran that a new' 'project update will be launched as a job dependency.'), ) scm_revision = models.CharField( max_length=1024, blank=True, default='', editable=False, verbose_name=_('SCM Revision'), help_text=_('The last revision fetched by a project update'), ) playbook_files = JSONField( blank=True, default=[], editable=False, verbose_name=_('Playbook Files'), help_text=_('List of playbooks found in the project'), ) inventory_files = JSONField( blank=True, default=[], editable=False, verbose_name=_('Inventory Files'), help_text=_('Suggested list of content that could be Ansible inventory in the project'), ) admin_role = ImplicitRoleField(parent_role=[ 'organization.admin_role', 'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ]) use_role = ImplicitRoleField( parent_role='admin_role', ) update_role = ImplicitRoleField( parent_role='admin_role', ) read_role = ImplicitRoleField(parent_role=[ 'organization.auditor_role', 'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR, 'use_role', 'update_role', ]) @classmethod def _get_unified_job_class(cls): return ProjectUpdate @classmethod def _get_unified_job_field_names(cls): return ['name', 'description', 'local_path', 'scm_type', 'scm_url', 'scm_branch', 'scm_clean', 'scm_delete_on_update', 'credential', 'schedule', 'timeout', 'launch_type',] def save(self, *args, **kwargs): new_instance = not bool(self.pk) # If update_fields has been specified, add our field names to it, # if it hasn't been specified, then we're just doing a normal save. update_fields = kwargs.get('update_fields', []) skip_update = bool(kwargs.pop('skip_update', False)) # Check if scm_type or scm_url changes. if self.pk: project_before = self.__class__.objects.get(pk=self.pk) if project_before.scm_type != self.scm_type or project_before.scm_url != self.scm_url: self.scm_delete_on_next_update = True if 'scm_delete_on_next_update' not in update_fields: update_fields.append('scm_delete_on_next_update') # Create auto-generated local path if project uses SCM. if self.pk and self.scm_type and not self.local_path.startswith('_'): slug_name = slugify(unicode(self.name)).replace(u'-', u'_') self.local_path = u'_%d__%s' % (int(self.pk), slug_name) if 'local_path' not in update_fields: update_fields.append('local_path') # Do the actual save. super(Project, self).save(*args, **kwargs) if new_instance: update_fields=[] # Generate local_path for SCM after initial save (so we have a PK). if self.scm_type and not self.local_path.startswith('_'): update_fields.append('local_path') if update_fields: from awx.main.signals import disable_activity_stream with disable_activity_stream(): self.save(update_fields=update_fields) # If we just created a new project with SCM, start the initial update. if new_instance and self.scm_type and not skip_update: self.update() def _get_current_status(self): if self.scm_type: if self.current_job and self.current_job.status: return self.current_job.status elif not self.last_job: return 'never updated' # inherit the child job status on failure elif self.last_job_failed: return self.last_job.status # Return the successful status else: return self.last_job.status elif not self.get_project_path(): return 'missing' else: return 'ok' def _get_last_job_run(self): if self.scm_type and self.last_job: return self.last_job.finished else: project_path = self.get_project_path() if project_path: try: mtime = os.path.getmtime(smart_str(project_path)) dt = datetime.datetime.fromtimestamp(mtime) return make_aware(dt, get_default_timezone()) except os.error: pass def _can_update(self): return bool(self.scm_type) def _update_unified_job_kwargs(self, create_kwargs, kwargs): ''' :param create_kwargs: key-worded arguments to be updated and later used for creating unified job. :type create_kwargs: dict :param kwargs: request parameters used to override unified job template fields with runtime values. :type kwargs: dict :return: modified create_kwargs. :rtype: dict ''' if self.scm_delete_on_next_update: create_kwargs['scm_delete_on_update'] = True return create_kwargs def create_project_update(self, **kwargs): return self.create_unified_job(**kwargs) @property def cache_timeout_blocked(self): if not self.last_job_run: return False if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) > now(): return True return False @property def needs_update_on_launch(self): if self.scm_type and self.scm_update_on_launch: if not self.last_job_run: return True if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) <= now(): return True return False @property def notification_templates(self): base_notification_templates = NotificationTemplate.objects error_notification_templates = list(base_notification_templates .filter(unifiedjobtemplate_notification_templates_for_errors=self)) success_notification_templates = list(base_notification_templates .filter(unifiedjobtemplate_notification_templates_for_success=self)) any_notification_templates = list(base_notification_templates .filter(unifiedjobtemplate_notification_templates_for_any=self)) # Get Organization NotificationTemplates if self.organization is not None: error_notification_templates = set(error_notification_templates + list(base_notification_templates .filter(organization_notification_templates_for_errors=self.organization))) success_notification_templates = set(success_notification_templates + list(base_notification_templates .filter(organization_notification_templates_for_success=self.organization))) any_notification_templates = set(any_notification_templates + list(base_notification_templates .filter(organization_notification_templates_for_any=self.organization))) return dict(error=list(error_notification_templates), success=list(success_notification_templates), any=list(any_notification_templates)) def get_absolute_url(self, request=None): return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request) class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManagerProjectUpdateMixin): ''' Internal job for tracking project updates from SCM. ''' class Meta: app_label = 'main' project = models.ForeignKey( 'Project', related_name='project_updates', on_delete=models.CASCADE, editable=False, ) job_type = models.CharField( max_length=64, choices=PROJECT_UPDATE_JOB_TYPE_CHOICES, default='check', ) @classmethod def _get_parent_field_name(cls): return 'project' @classmethod def _get_task_class(cls): from awx.main.tasks import RunProjectUpdate return RunProjectUpdate def _global_timeout_setting(self): return 'DEFAULT_PROJECT_UPDATE_TIMEOUT' def is_blocked_by(self, obj): if type(obj) == ProjectUpdate: if self.project == obj.project: return True if type(obj) == Job: if self.project == obj.project: return True return False def websocket_emit_data(self): websocket_data = super(ProjectUpdate, self).websocket_emit_data() websocket_data.update(dict(project_id=self.project.id)) return websocket_data @property def task_impact(self): return 0 if self.job_type == 'run' else 20 @property def result_stdout(self): return self._result_stdout_raw(redact_sensitive=True, escape_ascii=True) @property def result_stdout_raw(self): return self._result_stdout_raw(redact_sensitive=True) def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True): return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive=redact_sensitive) def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=True): return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive=redact_sensitive, escape_ascii=True) def get_absolute_url(self, request=None): return reverse('api:project_update_detail', kwargs={'pk': self.pk}, request=request) def get_ui_url(self): return urlparse.urljoin(settings.TOWER_URL_BASE, "/#/scm_update/{}".format(self.pk)) def _update_parent_instance(self): parent_instance = self._get_parent_instance() if parent_instance and self.job_type == 'check': update_fields = self._update_parent_instance_no_save(parent_instance) if self.status in ('successful', 'failed', 'error', 'canceled'): if not self.failed and parent_instance.scm_delete_on_next_update: parent_instance.scm_delete_on_next_update = False if 'scm_delete_on_next_update' not in update_fields: update_fields.append('scm_delete_on_next_update') parent_instance.save(update_fields=update_fields) def cancel(self, job_explanation=None, is_chain=False): res = super(ProjectUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain) if res and self.launch_type != 'sync': for inv_src in self.scm_inventory_updates.filter(status='running'): inv_src.cancel(job_explanation='Source project update `{}` was canceled.'.format(self.name)) return res ''' JobNotificationMixin ''' def get_notification_templates(self): return self.project.notification_templates def get_notification_friendly_name(self): return "Project Update" @property def preferred_instance_groups(self): if self.project is not None and self.project.organization is not None: organization_groups = [x for x in self.project.organization.instance_groups.all()] else: organization_groups = [] template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups] selected_groups = template_groups + organization_groups if not selected_groups: return self.global_instance_groups return selected_groups
__author__ = 'x1ang.li' import pickle, re from os import path, makedirs from urllib.parse import urljoin from shutil import rmtree from zipfile import ZipFile import requests from bs4 import BeautifulSoup from .bookbase import BookBase cachefolder = path.expanduser(path.join('~', 'snapbook', 'cache')) outfolder = path.expanduser(path.join('~', 'snapbook', 'output')) cookiefile = path.join(cachefolder, 'cookie.dat') def ensure_dir(directory): if not path.exists(directory): makedirs(directory) def padstr(x): if len(x) < 2 and x.isdigit(): return '0' + x else: return x def pad_secid(x): if '.' not in x: return padstr(x) else: return '.'.join(map(padstr, x.split('.'))) def get_sec(secid, session): ''' download section. if the section has already been downloaded to the cache folder, just load it :param secid: the id of the section, for example "5.1" :param session: the requests session that contains cookie info :return: the content of the section ''' secfile = path.join(cachefolder, secid + '.htm') if path.exists(secfile): with open(secfile, 'r', encoding='utf8') as f: content = f.read() else: response = session.get('http://textflow.mheducation.com/parser.php?secload={}'.format(secid)) content = response.text.split('%', 1)[1] with open(secfile, 'w', encoding='utf8') as f: f.write(content) return content def dlrs(src, dest, session): ''' download resource :param src: the Internet url :param dest: the destination path :return: None ''' # filter irrelevant resources such as schweser.com src = urljoin('http://textflow.mheducation.com/parser.php', src) if (('mhhe') not in src) and (('mheducation') not in src): return # if 'figures' in src: # splitted = src.rsplit('.', 1) # src = splitted[0]+'_lg'+'.'+splitted[1] destfile = path.join(outfolder, dest) if path.exists(destfile): return # print('Downloading: ' + src) response = session.get(src, timeout=60) if response.status_code == requests.codes.ok: with open(destfile, 'wb') as f: f.write(response.content) else: print('ERROR: fail to download: ' + src) def has_id(tag): return tag.has_attr('id') def scan(dom, secid, idmap): print('scanning: ' + secid) soup = BeautifulSoup(dom, 'html5lib') # The tricky part here is: when an elem has multiple ids (which does not conform to HTML standard!) , # some parsers go w/ the first id, while others go w/ the last for elem in soup.find_all(has_id): id = elem['id'] idmap[id] = secid def transform(section, secid, idmap, session): ''' reansforms a signle page :param section: :return: ''' print('Transforming: ' + secid) soup = BeautifulSoup(section, 'html.parser') # <span class="highlight4_on" id="hl_25057787"> text </span> # text for elem in soup.select('span[class^="highlight"]'): elem.unwrap() # <a class="link" onclick="LoadSection('4', 'id_1259197786_001_003907');">Chapter 4</a> # <a class="link" href="4.htm#id_1259197786_001_003907" .... for elem in soup.select('a[onclick^="LoadSection"]'): onclk = elem['onclick'] groups = onclk.split('\'') del elem['onclick'] elem['href'] = groups[1] + '.htm#' + groups[3] # <div class="asset_watch" onclick="OpenLink('http://lectures.mhhe.com/connect/1259206475/powerpoints/Ch06-PPT.pptx^-1^-1^22'); .... # <div class="asset_watch" onclick="openlink('assets/Ch06-PPT.pptx'); .... # <div class="asset_link" onclick="OpenLink('assets/1259197786/spliker6475.pdf^-1^-1^0') # <div class="asset_link" onclick="openlink('assets/spliker6475.pdf) for elem in soup.select('div[onclick^="OpenLink"]'): onclk = elem['onclick'] p = re.compile(r"OpenLink[(]'(.*)'[)]") m = p.search(onclk) if m is None: print('ERROR: OpenLink onclk: ' + onclk) continue # else block: url = m.group(1) src = url.split('^', 1)[0] if 'player.html' in src: # for Narrated PowerPoint, we don't download it. dest = src else: dest = 'assets/' + src.rsplit('/',1)[1] dlrs(src, dest, session) elem['onclick'] = 'openlink(\'' + dest +'\'); ' # end of else # <a class="link" onclick="GotoID('id_1259197786_001_000188');">Example 1-3</a> # <a class="link" href="#id_1259197786_001_000188">Example 1-3</a> for elem in soup.select('a[onclick^="GotoID"]'): onclk = elem['onclick'] p = re.compile(r"GotoID[(]'(.*)'[)]") m = p.search(onclk) if m is None: print('ERROR GotoID onclk: ' + onclk) continue # else block: id = m.group(1) idsec = idmap[id] if id in idmap else '' if idsec is '': print('Warning: unable to find id ' + id) del elem['onclick'] elem['href'] = (idsec + '.htm' if idsec != secid else '' ) + '#' + id # end of else # <a class="link" onclick="FindFigure('id_1259197786_001_030456');">Exhibit 13-9</a> # <a class="link" href="#id_1259197786_001_030456">Exhibit 13-9</a> for elem in soup.select('a[onclick^="FindFigure"]'): onclk = elem['onclick'] p = re.compile(r"FindFigure[(]'(.*)'[)]") m = p.search(onclk) if m is None: print('ERROR: FindFigure onclk: ' + onclk) continue # else block: id = m.group(1) idsec = idmap[id] if id in idmap else '' if idsec is '': print('Warning: unable to find id ' + id) del elem['onclick'] elem['href'] = ((idsec+'.htm') if idsec != secid else '' ) + '#' + id # end of else # <img id="id_1259197786_001_000268c" src="figures/1259197786/spi62368_p0108.png" class="inlineimage"> # <img id="id_1259197786_001_000268c" src="figures/spi62368_p0108.png" class="inlineimage"> # <img src="http://textflow.mheducation.com/books/1259197786/images/chapter_head1.jpg" # <img src="images/chapter_head1.jpg" for img in soup.select('img'): src = img['src'] dest = src.rsplit('/', 1)[1] if 'images' in src: dest = path.join('images',dest) elif 'figures' in src: dest = path.join('figures', dest) else: print('WARNING: Unidentified src ' + src) dlrs(src, dest, session) img['src'] = dest del img['onclick'] return str(soup) class McGrawHillBook(BookBase): def needlogin(self): if self.httpstate is not None: return True try: with open(cookiefile, 'rb') as f: self.httpstate = pickle.load(f) except IOError as e: return True r = requests.get('http://textflow.mheducation.com/parser.php?secload=TOC&xml', cookies = self.httpstate) return r.text.startswith('No Access') def login(self): print('start login()') session = requests.Session() payload = {'userName': self.conf['username'], 'password': self.conf['password']} session.post('http://connect.mheducation.com/connect/login/validate.htm', data=payload) session.get('http://connect.mheducation.com/connect/hmStudentCourseList.do?showSniffer=false&fromPage=login') session.get('http://connect.mheducation.com/connect/hmStudentSectionHomePortal.do?sectionId={}'.format(self.conf['sectionid'])) response = session.get('http://connect.mheducation.com/connect/hmEBook.do?setTab=sectionTabs') soup = BeautifulSoup(response.text, 'html.parser') inputs = soup.body.find('form', attrs={'id' : 'textflow_form'}).find_all('input') payload = {} for input in inputs: payload[input['name']] = input['value'] session.post('http://textflow.mheducation.com/wireinner.php', data=payload) # self.httpstate = list(filter(lambda x: x.name in ('PHPSESSID', 'ERIGHTS'), session.cookies)) self.httpstate = (session.cookies) with open(cookiefile, 'wb') as f: pickle.dump(self.httpstate, f) session.close() print('end login()') def gettoc(self, session): ''' retrieves the table of contents. :param session: the requests session that contains cookie info :return: a dict, with the key being the secion's id, and the value being the section's title ''' tocfile = path.join(cachefolder, 'toc.dat') toc = None try: with open(tocfile, 'rb') as f: toc = pickle.load(f) except IOError as e: pass if toc is not None: return toc toc = {} response = session.get('http://textflow.mheducation.com/parser.php?secload=TOC&xml') # print(response.text) soup = BeautifulSoup(response.text, 'xml') for lv1sec in soup.treedata.children: secid1 = lv1sec['secname'] sectitle1 = lv1sec['disp'] if secid1 not in toc: toc[secid1] = sectitle1 for lv2sec in lv1sec.find_all('item', type='page', recursive=False): secid2 = lv2sec['secname'] sectitle2 = BeautifulSoup(lv2sec['disp'], 'lxml').get_text() if secid2 not in toc: toc[secid2] = sectitle2 with open(tocfile, 'wb') as f: pickle.dump(toc, f) return toc def crawl(self): print('start crawl()') session = requests.Session() session.cookies = (self.httpstate) toc = self.gettoc(session) sortedsecs = sorted(toc.keys(), key=pad_secid) idmap = {} # Doanload all sections first, put them in the contentcache. And also build the idmap dict for secid in sortedsecs: scan(get_sec(secid, session), secid, idmap) for index, secid in enumerate(sortedsecs): prevsec = sortedsecs[index - 1] if (index > 0) else None nextsec = sortedsecs[index + 1] if (index < len(sortedsecs)-1) else None prevhtml = '<a href="{}.htm"> &lt;&lt;&lt; {} </a>'.format(prevsec, toc[prevsec]) if prevsec is not None else '' nexthtml = '<a href="{}.htm"> {} &gt;&gt;&gt; </a>'.format(nextsec, toc[nextsec]) if nextsec is not None else '' navbar = '<div style="position:relative; margin:20px; "><div>{}</div><div style="position:absolute;right:0;top:0;">{}</div></div>'.format(prevhtml, nexthtml) header = '<html><head><meta charset="UTF-8"><title>{} - {}</title><link rel="stylesheet" type="text/css" href="book.css"></head><body>'.format(secid, toc[secid]) + navbar footer = navbar + '<script src="book.js"></script></body></html>' content = transform(get_sec(secid, session), secid, idmap, session) html = header + content + footer secfile = path.join(outfolder, secid + '.htm') with open(secfile, 'w', encoding='utf8') as f: f.write(html) def init(self, clean=False): if clean: rmtree(cachefolder, ignore_errors=True) rmtree(outfolder, ignore_errors=True) ensure_dir(cachefolder) ensure_dir(outfolder) zipfile = path.join(path.dirname(__file__), 'mhbook.zip') with ZipFile(zipfile, 'r') as z: z.extractall(outfolder) def mainflow(self): super().mainflow()
__author__ = 'Mohammad' import os import numpy as np import tensorflow as tf from tensorflow.contrib import learn, rnn from data_loader import get_related_answers, get_vqa_data, load_image # Parameters embedding_dim = 300 word2vec_file = 'data/GoogleNews-vectors-negative300.bin' learning_rate = 0.001 batch_size = 8 display_step = 10 save_step = 200 n_hidden = 256 pre_output_len = 256 img_features_len = 512 def load_related_train_data(): related_answers = get_related_answers(True) question_texts = related_answers.keys() answers_vocab = list() ans_question_num = list() counter = 0 for q in question_texts: for ans in related_answers[q]: answers_vocab.append(ans) ans_question_num.append(counter) counter += 1 max_question_length = max([len(question.split(" ")) for question in question_texts]) questions_vocab_processor = learn.preprocessing.VocabularyProcessor(max_question_length) questions_vocab_processor.fit(question_texts) # questions = np.array(list(questions_vocab_processor.fit_transform(question_texts))) answers_vocab_processor = learn.preprocessing.VocabularyProcessor(1, min_frequency=20) answers_vocab_processor.fit(answers_vocab) print "answers size={}".format(len(answers_vocab_processor.vocabulary_) - 1) return questions_vocab_processor, answers_vocab_processor, max_question_length def load_data(questions_vocab_processor, answers_vocab_processor, is_train): vqa_triplets = get_vqa_data(is_train) question_texts = list() answers_vocab = list() images = list() for (q, a, v) in vqa_triplets: if a in answers_vocab_processor.vocabulary_._mapping: question_texts.append(q) answers_vocab.append(a) images.append(v) questions = np.array(list(questions_vocab_processor.transform(question_texts))) answers = np.array(list(answers_vocab_processor.transform(answers_vocab))) return questions, answers, images def load_word2vec(questions_vocab_processor): init_embedding_w = np.random.uniform(-0.25, 0.25, (len(questions_vocab_processor.vocabulary_), embedding_dim)) with open(word2vec_file, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size counter = 0 for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) idx = questions_vocab_processor.vocabulary_.get(word) if idx != 0: init_embedding_w[idx] = np.fromstring(f.read(binary_len), dtype='float32') else: f.read(binary_len) counter += 1 if counter % 100000 == 0: print counter print 'loading word2vec file is complete' return init_embedding_w def get_batch(step, questions, answers, images_paths, answers_vocab_len): batch_start = (step * batch_size) % len(questions) batch_in_questions = questions[batch_start:batch_start + batch_size] batch_in_images = list() batch_out = np.zeros((batch_size, answers_vocab_len)) for i in range(batch_start, batch_start + len(batch_in_questions)): batch_in_images.append(load_image(images_paths[i])) batch_out[i - batch_start, answers[i] - 1] = 1 tmp = batch_size - len(batch_in_questions) if tmp > 0: for i in range(0, tmp): batch_out[i + len(batch_in_questions), answers[i] - 1] = 1 batch_in_images.append(load_image(images_paths[i])) batch_in_questions = np.concatenate((batch_in_questions, questions[0:tmp]), axis=0) return batch_in_questions, np.asarray(batch_in_images), batch_out def get_batch_for_test(step, questions, answers, images_paths, answers_vocab_len): batch_start = (step * batch_size) % len(questions) batch_in_questions = questions[batch_start:batch_start + batch_size] batch_in_images = list() batch_out = np.zeros((len(batch_in_questions), answers_vocab_len)) for i in range(batch_start, batch_start + len(batch_in_questions)): batch_in_images.append(load_image(images_paths[i])) batch_out[i - batch_start, answers[i] - 1] = 1 return batch_in_questions, np.asarray(batch_in_images), batch_out, len(batch_in_questions) def run(): questions_vocab_processor, answers_vocab_processor, max_question_length = load_related_train_data() questions, answers, images_paths = load_data(questions_vocab_processor, answers_vocab_processor, True) sess = tf.Session() res_net_loader = tf.train.import_meta_graph('data/tensorflow-resnet-pretrained-20160509/ResNet-L152.meta') res_net_loader.restore(sess, 'data/tensorflow-resnet-pretrained-20160509/ResNet-L152.ckpt') graph = tf.get_default_graph() images = graph.get_tensor_by_name("images:0") raw_img_features = graph.get_tensor_by_name("avg_pool:0") raw_to_img_features_w = tf.Variable(tf.random_normal([raw_img_features.shape.as_list()[1], img_features_len]), name="raw_to_img_w") raw_to_img_features_bias = tf.Variable(tf.random_normal([img_features_len]), name="raw_to_img_bias") img_features = tf.nn.relu(tf.matmul(raw_img_features, raw_to_img_features_w) + raw_to_img_features_bias) embedding_w = tf.Variable(tf.random_uniform([len(questions_vocab_processor.vocabulary_), embedding_dim], -1.0, 1.0), name="embedding_w") input_questions = tf.placeholder(tf.int32, [None, questions.shape[1]], name="input_questions") embedded_chars = tf.nn.embedding_lookup(embedding_w, input_questions) unstacked_embedded_chars = tf.unstack(embedded_chars, max_question_length, 1) lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) encoded_questions, _ = rnn.static_rnn(lstm_cell, unstacked_embedded_chars, dtype=tf.float32) q_w = tf.Variable(tf.random_normal([n_hidden, n_hidden]), name="q_w") q_bias = tf.Variable(tf.random_normal([n_hidden]), name="q_bias") questions_features = tf.nn.relu(tf.matmul(encoded_questions[-1], q_w) + q_bias) output_len = len(answers_vocab_processor.vocabulary_) - 1 output_answers = tf.placeholder(tf.float32, [None, output_len], name="output_answers") # tmp_len = img_features_len * pre_output_len # q_to_img_w = tf.Variable(tf.random_normal([n_hidden, tmp_len]), name="q_to_img_w") # q_to_img_bias = tf.Variable(tf.random_normal([tmp_len]), name="q_to_img_bias") # img_out_w = tf.matmul(questions_features, q_to_img_w) + q_to_img_bias # img_out_w = tf.reshape(img_out_w, [-1, img_features_len, pre_output_len]) img_out_w = tf.Variable(tf.random_normal([img_features_len, pre_output_len]), name="img_w") q_out_w = tf.Variable(tf.random_normal([n_hidden, pre_output_len]), name="q_out_w") out_bias = tf.Variable(tf.random_normal([pre_output_len]), name="out_bias") pre_output = tf.nn.relu(tf.matmul(img_features, img_out_w) + tf.matmul(questions_features, q_out_w) + out_bias) pre_output_w = tf.Variable(tf.random_normal([pre_output_len, output_len]), name="pre_out_w") pre_output_bias = tf.Variable(tf.random_normal([output_len]), name="pre_out_bias") prediction = tf.matmul(pre_output, pre_output_w) + pre_output_bias prediction = tf.identity(prediction, name="prediction") cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=prediction, labels=output_answers), name='cost') optimizing_list = [raw_to_img_features_w, raw_to_img_features_bias, embedding_w, lstm_cell.trainable_variables, lstm_cell.trainable_weights, q_w, q_bias, img_out_w, q_out_w, out_bias, pre_output_w, pre_output_bias] optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, var_list=optimizing_list) step = tf.Variable(0, name="step") with sess.as_default(): sess.run(tf.global_variables_initializer()) init_embedding_w = load_word2vec(questions_vocab_processor) sess.run(embedding_w.assign(init_embedding_w)) saver = tf.train.Saver() if os.path.isfile('data/not_fined_tuned_trained_models/vqa_model.meta'): saver = tf.train.import_meta_graph('data/not_fined_tuned_trained_models/vqa_model.meta') saver.restore(sess, tf.train.latest_checkpoint('data/not_fined_tuned_trained_models/')) print "Restored step={}".format(sess.run(step)) while sess.run(step) * batch_size < len(questions): pythonic_step = sess.run(step) batch_in_questions, batch_in_images, batch_out, _ = get_batch_for_test(pythonic_step, questions, answers, images_paths, output_len) sess.run(optimizer, feed_dict={input_questions: batch_in_questions, images: batch_in_images, output_answers: batch_out}) sess.run(tf.assign_add(step, 1)) if pythonic_step % display_step == 0: loss = sess.run(cost, feed_dict={input_questions: batch_in_questions, images: batch_in_images, output_answers: batch_out}) print("Iter " + str(pythonic_step) + ", Minibatch Loss= " + "{:.6f}".format(loss)) if pythonic_step % save_step == 0: saver.save(sess, 'data/not_fined_tuned_trained_models/vqa_model_not_fin') print("Saving...") print("Optimization Finished!") saver.save(sess, 'data/not_fined_tuned_trained_models/vqa_model') sess.run(tf.assign(step, 0)) total_size = 0 losses = [] while sess.run(step) * batch_size < len(questions): pythonic_step = sess.run(step) batch_in_questions, batch_in_images, batch_out, size = get_batch_for_test(pythonic_step, questions, answers, images_paths, output_len) loss = sess.run(cost, feed_dict={input_questions: batch_in_questions, images: batch_in_images, output_answers: batch_out}) losses.append(loss * size) total_size += size if pythonic_step % display_step == 0: print("Training samples {} out of {}".format(pythonic_step * batch_size, len(questions))) print("Till now training loss= " + "{:.6f}".format(sum(losses) / total_size)) sess.run(tf.assign_add(step, 1)) total_train_loss = sum(losses) / total_size print("Total Training Loss= " + "{:.6f}".format(total_train_loss)) if total_size != len(questions): print("BUG!!!!") print(total_size) print(len(questions)) return questions, answers = load_data(questions_vocab_processor, answers_vocab_processor, False) sess.run(tf.assign(step, 0)) total_size = 0 losses = [] while sess.run(step) * batch_size < len(questions): pythonic_step = sess.run(step) batch_in_questions, batch_in_images, batch_out, size = get_batch_for_test(pythonic_step, questions, answers, images_paths, output_len) loss = sess.run(cost, feed_dict={input_questions: batch_in_questions, images: batch_in_images, output_answers: batch_out}) losses.append(loss * size) total_size += size if pythonic_step % display_step == 0: print("Validation samples {} out of {}".format(pythonic_step * batch_size, len(questions))) print("Till now validation loss= " + "{:.6f}".format(sum(losses) / total_size)) print("Total Training Loss= " + "{:.6f}".format(total_train_loss)) sess.run(tf.assign_add(step, 1)) total_validation_loss = sum(losses) / len(questions) print("Total Validation Loss= " + "{:.6f}".format(total_validation_loss)) if total_size != len(questions): print("BUG!!!!") print(total_size) print(len(questions)) return if __name__ == "__main__": run()
# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import netaddr from webob import exc from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2 from nova.api.openstack.compute.plugins.v3 import hypervisors \ as hypervisors_v21 from nova.api.openstack import extensions from nova.cells import utils as cells_utils from nova import context from nova import db from nova import exception from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_instance TEST_HYPERS = [ dict(id=1, service_id=1, host="compute1", vcpus=4, memory_mb=10 * 1024, local_gb=250, vcpus_used=2, memory_mb_used=5 * 1024, local_gb_used=125, hypervisor_type="xen", hypervisor_version=3, hypervisor_hostname="hyper1", free_ram_mb=5 * 1024, free_disk_gb=125, current_workload=2, running_vms=2, cpu_info='cpu_info', disk_available_least=100, host_ip=netaddr.IPAddress('1.1.1.1')), dict(id=2, service_id=2, host="compute2", vcpus=4, memory_mb=10 * 1024, local_gb=250, vcpus_used=2, memory_mb_used=5 * 1024, local_gb_used=125, hypervisor_type="xen", hypervisor_version=3, hypervisor_hostname="hyper2", free_ram_mb=5 * 1024, free_disk_gb=125, current_workload=2, running_vms=2, cpu_info='cpu_info', disk_available_least=100, host_ip=netaddr.IPAddress('2.2.2.2'))] TEST_SERVICES = [ objects.Service(id=1, host="compute1", binary="nova-compute", topic="compute_topic", report_count=5, disabled=False, disabled_reason=None, availability_zone="nova"), objects.Service(id=2, host="compute2", binary="nova-compute", topic="compute_topic", report_count=5, disabled=False, disabled_reason=None, availability_zone="nova"), ] TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct) for hyper_dct in TEST_HYPERS] TEST_HYPERS[0].update({'service': TEST_SERVICES[0]}) TEST_HYPERS[1].update({'service': TEST_SERVICES[1]}) TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"), dict(name="inst2", uuid="uuid2", host="compute2"), dict(name="inst3", uuid="uuid3", host="compute1"), dict(name="inst4", uuid="uuid4", host="compute2")] def fake_compute_node_get_all(context): return TEST_HYPERS_OBJ def fake_compute_node_search_by_hypervisor(context, hypervisor_re): return TEST_HYPERS_OBJ def fake_compute_node_get(context, compute_id): for hyper in TEST_HYPERS_OBJ: if hyper.id == int(compute_id): return hyper raise exception.ComputeHostNotFound(host=compute_id) def fake_service_get_by_compute_host(context, host): for service in TEST_SERVICES: if service.host == host: return service def fake_compute_node_statistics(context): result = dict( count=0, vcpus=0, memory_mb=0, local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0, free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0, disk_available_least=0, ) for hyper in TEST_HYPERS_OBJ: for key in result: if key == 'count': result[key] += 1 else: result[key] += hyper[key] return result def fake_instance_get_all_by_host(context, host): results = [] for inst in TEST_SERVERS: if inst['host'] == host: inst_obj = fake_instance.fake_instance_obj(context, **inst) results.append(inst_obj) return results class HypervisorsTestV21(test.NoDBTestCase): # copying the objects locally so the cells testcases can provide their own TEST_HYPERS_OBJ = copy.deepcopy(TEST_HYPERS_OBJ) TEST_SERVICES = copy.deepcopy(TEST_SERVICES) TEST_SERVERS = copy.deepcopy(TEST_SERVERS) DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS) del DETAIL_HYPERS_DICTS[0]['service_id'] del DETAIL_HYPERS_DICTS[1]['service_id'] del DETAIL_HYPERS_DICTS[0]['host'] del DETAIL_HYPERS_DICTS[1]['host'] DETAIL_HYPERS_DICTS[0].update({'state': 'up', 'status': 'enabled', 'service': dict(id=1, host='compute1', disabled_reason=None)}) DETAIL_HYPERS_DICTS[1].update({'state': 'up', 'status': 'enabled', 'service': dict(id=2, host='compute2', disabled_reason=None)}) INDEX_HYPER_DICTS = [ dict(id=1, hypervisor_hostname="hyper1", state='up', status='enabled'), dict(id=2, hypervisor_hostname="hyper2", state='up', status='enabled')] def _get_request(self, use_admin_context): return fakes.HTTPRequest.blank('', use_admin_context=use_admin_context) def _set_up_controller(self): self.controller = hypervisors_v21.HypervisorsController() self.controller.servicegroup_api.service_is_up = mock.MagicMock( return_value=True) def setUp(self): super(HypervisorsTestV21, self).setUp() self._set_up_controller() self.rule_hyp_show = "os_compute_api:os-hypervisors" self.stubs.Set(self.controller.host_api, 'compute_node_get_all', fake_compute_node_get_all) self.stubs.Set(self.controller.host_api, 'service_get_by_compute_host', fake_service_get_by_compute_host) self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor) self.stubs.Set(self.controller.host_api, 'compute_node_get', fake_compute_node_get) self.stubs.Set(db, 'compute_node_statistics', fake_compute_node_statistics) def test_view_hypervisor_nodetail_noservers(self): result = self.controller._view_hypervisor( self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False) self.assertEqual(result, self.INDEX_HYPER_DICTS[0]) def test_view_hypervisor_detail_noservers(self): result = self.controller._view_hypervisor( self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], True) self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0]) def test_view_hypervisor_servers(self): result = self.controller._view_hypervisor(self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False, self.TEST_SERVERS) expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0]) expected_dict.update({'servers': [ dict(name="inst1", uuid="uuid1"), dict(name="inst2", uuid="uuid2"), dict(name="inst3", uuid="uuid3"), dict(name="inst4", uuid="uuid4")]}) self.assertEqual(result, expected_dict) def test_index(self): req = self._get_request(True) result = self.controller.index(req) self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS)) def test_index_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) def test_detail(self): req = self._get_request(True) result = self.controller.detail(req) self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS)) def test_detail_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.detail, req) def test_show_noid(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3') def test_show_non_integer_id(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc') def test_show_withid(self): req = self._get_request(True) result = self.controller.show(req, self.TEST_HYPERS_OBJ[0].id) self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0])) def test_show_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, req, self.TEST_HYPERS_OBJ[0].id) def test_uptime_noid(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3') def test_uptime_notimplemented(self): def fake_get_host_uptime(context, hyp): raise exc.HTTPNotImplemented() self.stubs.Set(self.controller.host_api, 'get_host_uptime', fake_get_host_uptime) req = self._get_request(True) self.assertRaises(exc.HTTPNotImplemented, self.controller.uptime, req, self.TEST_HYPERS_OBJ[0].id) def test_uptime_implemented(self): def fake_get_host_uptime(context, hyp): return "fake uptime" self.stubs.Set(self.controller.host_api, 'get_host_uptime', fake_get_host_uptime) req = self._get_request(True) result = self.controller.uptime(req, self.TEST_HYPERS_OBJ[0].id) expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0]) expected_dict.update({'uptime': "fake uptime"}) self.assertEqual(result, dict(hypervisor=expected_dict)) def test_uptime_non_integer_id(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc') def test_uptime_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.uptime, req, self.TEST_HYPERS_OBJ[0].id) def test_search(self): req = self._get_request(True) result = self.controller.search(req, 'hyper') self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS)) def test_search_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.search, req, self.TEST_HYPERS_OBJ[0].id) def test_search_non_exist(self): def fake_compute_node_search_by_hypervisor_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor_return_empty) req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a') @mock.patch.object(objects.InstanceList, 'get_by_host', side_effect=fake_instance_get_all_by_host) def test_servers(self, mock_get): req = self._get_request(True) result = self.controller.servers(req, 'hyper') expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS) expected_dict[0].update({'servers': [ dict(uuid="uuid1"), dict(uuid="uuid3")]}) expected_dict[1].update({'servers': [ dict(uuid="uuid2"), dict(uuid="uuid4")]}) for output in result['hypervisors']: servers = output['servers'] for server in servers: del server['name'] self.assertEqual(result, dict(hypervisors=expected_dict)) def test_servers_non_id(self): def fake_compute_node_search_by_hypervisor_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor_return_empty) req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.servers, req, '115') def test_servers_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.servers, req, self.TEST_HYPERS_OBJ[0].id) def test_servers_with_non_integer_hypervisor_id(self): def fake_compute_node_search_by_hypervisor_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor_return_empty) req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.servers, req, 'abc') def test_servers_with_no_server(self): def fake_instance_get_all_by_host_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'instance_get_all_by_host', fake_instance_get_all_by_host_return_empty) req = self._get_request(True) result = self.controller.servers(req, self.TEST_HYPERS_OBJ[0].id) self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS)) def test_statistics(self): req = self._get_request(True) result = self.controller.statistics(req) self.assertEqual(result, dict(hypervisor_statistics=dict( count=2, vcpus=8, memory_mb=20 * 1024, local_gb=500, vcpus_used=4, memory_mb_used=10 * 1024, local_gb_used=250, free_ram_mb=10 * 1024, free_disk_gb=250, current_workload=4, running_vms=4, disk_available_least=200))) def test_statistics_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.statistics, req) class HypervisorsTestV2(HypervisorsTestV21): DETAIL_HYPERS_DICTS = copy.deepcopy( HypervisorsTestV21.DETAIL_HYPERS_DICTS) del DETAIL_HYPERS_DICTS[0]['state'] del DETAIL_HYPERS_DICTS[1]['state'] del DETAIL_HYPERS_DICTS[0]['status'] del DETAIL_HYPERS_DICTS[1]['status'] del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason'] del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason'] del DETAIL_HYPERS_DICTS[0]['host_ip'] del DETAIL_HYPERS_DICTS[1]['host_ip'] INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS) del INDEX_HYPER_DICTS[0]['state'] del INDEX_HYPER_DICTS[1]['state'] del INDEX_HYPER_DICTS[0]['status'] del INDEX_HYPER_DICTS[1]['status'] def setUp(self): super(HypervisorsTestV2, self).setUp() self.rule_hyp_show = "compute_extension:hypervisors" self.rule = {self.rule_hyp_show: ""} def _set_up_controller(self): self.context = context.get_admin_context() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr) def test_index_non_admin_back_compatible_db(self): self.policy.set_rules(self.rule) req = self._get_request(False) self.assertRaises(exception.AdminRequired, self.controller.index, req) def test_detail_non_admin_back_compatible_db(self): self.policy.set_rules(self.rule) req = self._get_request(False) self.assertRaises(exception.AdminRequired, self.controller.detail, req) def test_search_non_admin_back_compatible_db(self): self.policy.set_rules(self.rule) req = self._get_request(False) self.assertRaises(exception.AdminRequired, self.controller.search, req, self.TEST_HYPERS_OBJ[0].id) def test_servers_non_admin_back_compatible_db(self): self.policy.set_rules(self.rule) req = self._get_request(False) self.assertRaises(exception.AdminRequired, self.controller.servers, req, self.TEST_HYPERS_OBJ[0].id) class CellHypervisorsTestV21(HypervisorsTestV21): cell_path = 'cell1' TEST_HYPERS_OBJ = [cells_utils.ComputeNodeProxy(obj, cell_path) for obj in TEST_HYPERS_OBJ] TEST_SERVICES = [cells_utils.ServiceProxy(obj, cell_path) for obj in TEST_SERVICES] TEST_SERVERS = [dict(server, host=cells_utils.cell_with_item(cell_path, server['host'])) for server in TEST_SERVERS] DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS) DETAIL_HYPERS_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path, hyp['id']), service=dict(hyp['service'], id=cells_utils.cell_with_item( cell_path, hyp['service']['id']), host=cells_utils.cell_with_item( cell_path, hyp['service']['host']))) for hyp in DETAIL_HYPERS_DICTS] INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS) INDEX_HYPER_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path, hyp['id'])) for hyp in INDEX_HYPER_DICTS] @classmethod def fake_compute_node_get_all(cls, context): return cls.TEST_HYPERS_OBJ @classmethod def fake_compute_node_search_by_hypervisor(cls, context, hypervisor_re): return cls.TEST_HYPERS_OBJ @classmethod def fake_compute_node_get(cls, context, compute_id): for hyper in cls.TEST_HYPERS_OBJ: if hyper.id == compute_id: return hyper raise exception.ComputeHostNotFound(host=compute_id) @classmethod def fake_service_get_by_compute_host(cls, context, host): for service in cls.TEST_SERVICES: if service.host == host: return service @classmethod def fake_instance_get_all_by_host(cls, context, host): results = [] for inst in cls.TEST_SERVERS: if inst['host'] == host: results.append(inst) return results def setUp(self): self.flags(enable=True, cell_type='api', group='cells') super(CellHypervisorsTestV21, self).setUp() self.stubs.Set(self.controller.host_api, 'compute_node_get_all', self.fake_compute_node_get_all) self.stubs.Set(self.controller.host_api, 'service_get_by_compute_host', self.fake_service_get_by_compute_host) self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', self.fake_compute_node_search_by_hypervisor) self.stubs.Set(self.controller.host_api, 'compute_node_get', self.fake_compute_node_get) self.stubs.Set(self.controller.host_api, 'compute_node_statistics', fake_compute_node_statistics) self.stubs.Set(self.controller.host_api, 'instance_get_all_by_host', self.fake_instance_get_all_by_host) class CellHypervisorsTestV2(HypervisorsTestV2, CellHypervisorsTestV21): cell_path = 'cell1' DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV2.DETAIL_HYPERS_DICTS) DETAIL_HYPERS_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path, hyp['id']), service=dict(hyp['service'], id=cells_utils.cell_with_item( cell_path, hyp['service']['id']), host=cells_utils.cell_with_item( cell_path, hyp['service']['host']))) for hyp in DETAIL_HYPERS_DICTS] INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV2.INDEX_HYPER_DICTS) INDEX_HYPER_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path, hyp['id'])) for hyp in INDEX_HYPER_DICTS] def setUp(self): super(CellHypervisorsTestV2, self).setUp()
#!/usr/bin/env python # # Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections import OrderedDict import itertools import json import multiprocessing import optparse import os from os.path import getmtime, isdir, join import platform import random import shlex import subprocess import sys import time from testrunner.local import execution from testrunner.local import progress from testrunner.local import testsuite from testrunner.local.variants import ALL_VARIANTS from testrunner.local import utils from testrunner.local import verbose from testrunner.network import network_execution from testrunner.objects import context # Base dir of the v8 checkout to be used as cwd. BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DEFAULT_OUT_GN = "out.gn" ARCH_GUESS = utils.DefaultArch() # Map of test name synonyms to lists of test suites. Should be ordered by # expected runtimes (suites with slow test cases first). These groups are # invoked in separate steps on the bots. TEST_MAP = { # This needs to stay in sync with test/bot_default.isolate. "bot_default": [ "debugger", "mjsunit", "cctest", "wasm-spec-tests", "inspector", "webkit", "mkgrokdump", "fuzzer", "message", "preparser", "intl", "unittests", ], # This needs to stay in sync with test/default.isolate. "default": [ "debugger", "mjsunit", "cctest", "wasm-spec-tests", "inspector", "mkgrokdump", "fuzzer", "message", "preparser", "intl", "unittests", ], # This needs to stay in sync with test/optimize_for_size.isolate. "optimize_for_size": [ "debugger", "mjsunit", "cctest", "inspector", "webkit", "intl", ], "unittests": [ "unittests", ], } TIMEOUT_DEFAULT = 60 # Variants ordered by expected runtime (slowest first). VARIANTS = ["default"] MORE_VARIANTS = [ "stress", "nooptimization", "stress_asm_wasm", "wasm_traps", ] EXHAUSTIVE_VARIANTS = MORE_VARIANTS + VARIANTS VARIANT_ALIASES = { # The default for developer workstations. "dev": VARIANTS, # Additional variants, run on all bots. "more": MORE_VARIANTS, # TODO(machenbach): Deprecate this after the step is removed on infra side. # Additional variants, run on a subset of bots. "extra": [], } DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort"] MODES = { "debug": { "flags": DEBUG_FLAGS, "timeout_scalefactor": 4, "status_mode": "debug", "execution_mode": "debug", "output_folder": "debug", }, "optdebug": { "flags": DEBUG_FLAGS, "timeout_scalefactor": 4, "status_mode": "debug", "execution_mode": "debug", "output_folder": "optdebug", }, "release": { "flags": RELEASE_FLAGS, "timeout_scalefactor": 1, "status_mode": "release", "execution_mode": "release", "output_folder": "release", }, # Normal trybot release configuration. There, dchecks are always on which # implies debug is set. Hence, the status file needs to assume debug-like # behavior/timeouts. "tryrelease": { "flags": RELEASE_FLAGS, "timeout_scalefactor": 1, "status_mode": "debug", "execution_mode": "release", "output_folder": "release", }, # This mode requires v8 to be compiled with dchecks and slow dchecks. "slowrelease": { "flags": RELEASE_FLAGS + ["--enable-slow-asserts"], "timeout_scalefactor": 2, "status_mode": "debug", "execution_mode": "release", "output_folder": "release", }, } GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction", "--concurrent-recompilation-queue-length=64", "--concurrent-recompilation-delay=500", "--concurrent-recompilation"] SUPPORTED_ARCHS = ["android_arm", "android_arm64", "android_ia32", "android_x64", "arm", "ia32", "mips", "mipsel", "mips64", "mips64el", "s390", "s390x", "ppc", "ppc64", "x64", "x32", "arm64"] # Double the timeout for these: SLOW_ARCHS = ["android_arm", "android_arm64", "android_ia32", "android_x64", "arm", "mips", "mipsel", "mips64", "mips64el", "s390", "s390x", "arm64"] def BuildOptions(): result = optparse.OptionParser() result.usage = '%prog [options] [tests]' result.description = """TESTS: %s""" % (TEST_MAP["default"]) result.add_option("--arch", help=("The architecture to run tests for, " "'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS)) result.add_option("--arch-and-mode", help="Architecture and mode in the format 'arch.mode'") result.add_option("--asan", help="Regard test expectations for ASAN", default=False, action="store_true") result.add_option("--sancov-dir", help="Directory where to collect coverage data") result.add_option("--cfi-vptr", help="Run tests with UBSAN cfi_vptr option.", default=False, action="store_true") result.add_option("--buildbot", help="Adapt to path structure used on buildbots", default=False, action="store_true") result.add_option("--dcheck-always-on", help="Indicates that V8 was compiled with DCHECKs enabled", default=False, action="store_true") result.add_option("--novfp3", help="Indicates that V8 was compiled without VFP3 support", default=False, action="store_true") result.add_option("--cat", help="Print the source of the tests", default=False, action="store_true") result.add_option("--slow-tests", help="Regard slow tests (run|skip|dontcare)", default="dontcare") result.add_option("--pass-fail-tests", help="Regard pass|fail tests (run|skip|dontcare)", default="dontcare") result.add_option("--gc-stress", help="Switch on GC stress mode", default=False, action="store_true") result.add_option("--gcov-coverage", help="Uses executables instrumented for gcov coverage", default=False, action="store_true") result.add_option("--command-prefix", help="Prepended to each shell command used to run a test", default="") result.add_option("--download-data", help="Download missing test suite data", default=False, action="store_true") result.add_option("--download-data-only", help="Deprecated", default=False, action="store_true") result.add_option("--extra-flags", help="Additional flags to pass to each test command", action="append", default=[]) result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true") result.add_option("-j", help="The number of parallel tasks to run", default=0, type="int") result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated," " uppercase for ninja and buildbot builds): %s" % MODES.keys()) result.add_option("--no-harness", "--noharness", help="Run without test harness of a given suite", default=False, action="store_true") result.add_option("--no-i18n", "--noi18n", help="Skip internationalization tests", default=False, action="store_true") result.add_option("--no-network", "--nonetwork", help="Don't distribute tests on the network", default=(utils.GuessOS() != "linux"), dest="no_network", action="store_true") result.add_option("--no-presubmit", "--nopresubmit", help='Skip presubmit checks (deprecated)', default=False, dest="no_presubmit", action="store_true") result.add_option("--no-snap", "--nosnap", help='Test a build compiled without snapshot.', default=False, dest="no_snap", action="store_true") result.add_option("--no-sorting", "--nosorting", help="Don't sort tests according to duration of last run.", default=False, dest="no_sorting", action="store_true") result.add_option("--no-variants", "--novariants", help="Don't run any testing variants", default=False, dest="no_variants", action="store_true") result.add_option("--variants", help="Comma-separated list of testing variants;" " default: \"%s\"" % ",".join(VARIANTS)) result.add_option("--exhaustive-variants", default=False, action="store_true", help="Use exhaustive set of default variants:" " \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS)) result.add_option("--outdir", help="Base directory with compile output", default="out") result.add_option("--gn", help="Scan out.gn for the last built configuration", default=False, action="store_true") result.add_option("--predictable", help="Compare output of several reruns of each test", default=False, action="store_true") result.add_option("-p", "--progress", help=("The style of progress indicator" " (verbose, dots, color, mono)"), choices=progress.PROGRESS_INDICATORS.keys(), default="mono") result.add_option("--quickcheck", default=False, action="store_true", help=("Quick check mode (skip slow tests)")) result.add_option("--report", help="Print a summary of the tests to be run", default=False, action="store_true") result.add_option("--json-test-results", help="Path to a file for storing json results.") result.add_option("--flakiness-results", help="Path to a file for storing flakiness json.") result.add_option("--rerun-failures-count", help=("Number of times to rerun each failing test case. " "Very slow tests will be rerun only once."), default=0, type="int") result.add_option("--rerun-failures-max", help="Maximum number of failing test cases to rerun.", default=100, type="int") result.add_option("--shard-count", help="Split testsuites into this number of shards", default=1, type="int") result.add_option("--shard-run", help="Run this shard from the split up tests.", default=1, type="int") result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="") result.add_option("--shell-dir", help="Directory containing executables", default="") result.add_option("--dont-skip-slow-simulator-tests", help="Don't skip more slow tests when using a simulator.", default=False, action="store_true", dest="dont_skip_simulator_slow_tests") result.add_option("--swarming", help="Indicates running test driver on swarming.", default=False, action="store_true") result.add_option("--time", help="Print timing information after running", default=False, action="store_true") result.add_option("-t", "--timeout", help="Timeout in seconds", default=TIMEOUT_DEFAULT, type="int") result.add_option("--tsan", help="Regard test expectations for TSAN", default=False, action="store_true") result.add_option("-v", "--verbose", help="Verbose output", default=False, action="store_true") result.add_option("--valgrind", help="Run tests through valgrind", default=False, action="store_true") result.add_option("--warn-unused", help="Report unused rules", default=False, action="store_true") result.add_option("--junitout", help="File name of the JUnit output") result.add_option("--junittestsuite", help="The testsuite name in the JUnit output file", default="v8tests") result.add_option("--random-seed", default=0, dest="random_seed", type="int", help="Default seed for initializing random generator") result.add_option("--random-seed-stress-count", default=1, type="int", dest="random_seed_stress_count", help="Number of runs with different random seeds") result.add_option("--ubsan-vptr", help="Regard test expectations for UBSanVptr", default=False, action="store_true") result.add_option("--msan", help="Regard test expectations for UBSanVptr", default=False, action="store_true") return result def RandomSeed(): seed = 0 while not seed: seed = random.SystemRandom().randint(-2147483648, 2147483647) return seed def BuildbotToV8Mode(config): """Convert buildbot build configs to configs understood by the v8 runner. V8 configs are always lower case and without the additional _x64 suffix for 64 bit builds on windows with ninja. """ mode = config[:-4] if config.endswith('_x64') else config return mode.lower() def SetupEnvironment(options): """Setup additional environment variables.""" # Many tests assume an English interface. os.environ['LANG'] = 'en_US.UTF-8' symbolizer = 'external_symbolizer_path=%s' % ( os.path.join( BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer', ) ) if options.asan: asan_options = [symbolizer, "allow_user_segv_handler=1"] if not utils.GuessOS() == 'macos': # LSAN is not available on mac. asan_options.append('detect_leaks=1') os.environ['ASAN_OPTIONS'] = ":".join(asan_options) if options.sancov_dir: assert os.path.exists(options.sancov_dir) os.environ['ASAN_OPTIONS'] = ":".join([ 'coverage=1', 'coverage_dir=%s' % options.sancov_dir, symbolizer, "allow_user_segv_handler=1", ]) if options.cfi_vptr: os.environ['UBSAN_OPTIONS'] = ":".join([ 'print_stacktrace=1', 'print_summary=1', 'symbolize=1', symbolizer, ]) if options.ubsan_vptr: os.environ['UBSAN_OPTIONS'] = ":".join([ 'print_stacktrace=1', symbolizer, ]) if options.msan: os.environ['MSAN_OPTIONS'] = symbolizer if options.tsan: suppressions_file = os.path.join( BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt') os.environ['TSAN_OPTIONS'] = " ".join([ symbolizer, 'suppressions=%s' % suppressions_file, 'exit_code=0', 'report_thread_leaks=0', 'history_size=7', 'report_destroy_locked=0', ]) def ProcessOptions(options): global VARIANTS # First try to auto-detect configurations based on the build if GN was # used. This can't be overridden by cmd-line arguments. options.auto_detect = False if options.gn: gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN) latest_timestamp = -1 latest_config = None for gn_config in os.listdir(gn_out_dir): gn_config_dir = os.path.join(gn_out_dir, gn_config) if not isdir(gn_config_dir): continue if os.path.getmtime(gn_config_dir) > latest_timestamp: latest_timestamp = os.path.getmtime(gn_config_dir) latest_config = gn_config if latest_config: print(">>> Latest GN build found is %s" % latest_config) options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config) if options.buildbot: build_config_path = os.path.join( BASE_DIR, options.outdir, options.mode, "v8_build_config.json") else: build_config_path = os.path.join( BASE_DIR, options.outdir, "v8_build_config.json") # Auto-detect test configurations based on the build (GN only). if os.path.exists(build_config_path): try: with open(build_config_path) as f: build_config = json.load(f) except Exception: print ("%s exists but contains invalid json. Is your build up-to-date?" % build_config_path) return False options.auto_detect = True # In auto-detect mode the outdir is always where we found the build config. # This ensures that we'll also take the build products from there. options.outdir = os.path.dirname(build_config_path) options.arch_and_mode = None if options.mode: # In auto-detect mode we don't use the mode for more path-magic. # Therefore transform the buildbot mode here to fit to the GN build # config. options.mode = BuildbotToV8Mode(options.mode) # In V8 land, GN's x86 is called ia32. if build_config["v8_target_cpu"] == "x86": build_config["v8_target_cpu"] = "ia32" # Update options based on the build config. Sanity check that we're not # trying to use inconsistent options. for param, value in ( ('arch', build_config["v8_target_cpu"]), ('asan', build_config["is_asan"]), ('dcheck_always_on', build_config["dcheck_always_on"]), ('gcov_coverage', build_config["is_gcov_coverage"]), ('mode', 'debug' if build_config["is_debug"] else 'release'), ('msan', build_config["is_msan"]), ('no_i18n', not build_config["v8_enable_i18n_support"]), ('no_snap', not build_config["v8_use_snapshot"]), ('tsan', build_config["is_tsan"]), ('ubsan_vptr', build_config["is_ubsan_vptr"])): cmd_line_value = getattr(options, param) if cmd_line_value not in [None, True, False] and cmd_line_value != value: # TODO(machenbach): This is for string options only. Requires options # to not have default values. We should make this more modular and # implement it in our own version of the option parser. print "Attempted to set %s to %s, while build is %s." % ( param, cmd_line_value, value) return False if cmd_line_value == True and value == False: print "Attempted to turn on %s, but it's not available." % ( param) return False if cmd_line_value != value: print ">>> Auto-detected %s=%s" % (param, value) setattr(options, param, value) else: # Non-GN build without auto-detect. Set default values for missing # parameters. if not options.mode: options.mode = "release,debug" if not options.arch: options.arch = "ia32,x64,arm" # Architecture and mode related stuff. if options.arch_and_mode: options.arch_and_mode = [arch_and_mode.split(".") for arch_and_mode in options.arch_and_mode.split(",")] options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode]) options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode]) options.mode = options.mode.split(",") for mode in options.mode: if not BuildbotToV8Mode(mode) in MODES: print "Unknown mode %s" % mode return False if options.arch in ["auto", "native"]: options.arch = ARCH_GUESS options.arch = options.arch.split(",") for arch in options.arch: if not arch in SUPPORTED_ARCHS: print "Unknown architecture %s" % arch return False # Store the final configuration in arch_and_mode list. Don't overwrite # predefined arch_and_mode since it is more expressive than arch and mode. if not options.arch_and_mode: options.arch_and_mode = itertools.product(options.arch, options.mode) # Special processing of other options, sorted alphabetically. if options.buildbot: options.no_network = True if options.command_prefix: print("Specifying --command-prefix disables network distribution, " "running tests locally.") options.no_network = True options.command_prefix = shlex.split(options.command_prefix) options.extra_flags = sum(map(shlex.split, options.extra_flags), []) if options.gc_stress: options.extra_flags += GC_STRESS_FLAGS if options.asan: options.extra_flags.append("--invoke-weak-callbacks") options.extra_flags.append("--omit-quit") if options.novfp3: options.extra_flags.append("--noenable-vfp3") if options.exhaustive_variants: # This is used on many bots. It includes a larger set of default variants. # Other options for manipulating variants still apply afterwards. VARIANTS = EXHAUSTIVE_VARIANTS # TODO(machenbach): Figure out how to test a bigger subset of variants on # msan and tsan. if options.msan: VARIANTS = ["default"] if options.tsan: VARIANTS = ["default"] if options.j == 0: options.j = multiprocessing.cpu_count() if options.random_seed_stress_count <= 1 and options.random_seed == 0: options.random_seed = RandomSeed() def excl(*args): """Returns true if zero or one of multiple arguments are true.""" return reduce(lambda x, y: x + y, args) <= 1 if not excl(options.no_variants, bool(options.variants)): print("Use only one of --no-variants or --variants.") return False if options.quickcheck: VARIANTS = ["default", "stress"] options.slow_tests = "skip" options.pass_fail_tests = "skip" if options.no_variants: VARIANTS = ["default"] if options.variants: VARIANTS = options.variants.split(",") # Resolve variant aliases. VARIANTS = reduce( list.__add__, (VARIANT_ALIASES.get(v, [v]) for v in VARIANTS), [], ) if not set(VARIANTS).issubset(ALL_VARIANTS): print "All variants must be in %s" % str(ALL_VARIANTS) return False if options.predictable: VARIANTS = ["default"] options.extra_flags.append("--predictable") options.extra_flags.append("--verify_predictable") options.extra_flags.append("--no-inline-new") # Dedupe. VARIANTS = list(set(VARIANTS)) if not options.shell_dir: if options.shell: print "Warning: --shell is deprecated, use --shell-dir instead." options.shell_dir = os.path.dirname(options.shell) if options.valgrind: run_valgrind = os.path.join("tools", "run-valgrind.py") # This is OK for distributed running, so we don't need to set no_network. options.command_prefix = (["python", "-u", run_valgrind] + options.command_prefix) def CheckTestMode(name, option): if not option in ["run", "skip", "dontcare"]: print "Unknown %s mode %s" % (name, option) return False return True if not CheckTestMode("slow test", options.slow_tests): return False if not CheckTestMode("pass|fail test", options.pass_fail_tests): return False if options.no_i18n: TEST_MAP["bot_default"].remove("intl") TEST_MAP["default"].remove("intl") return True def ShardTests(tests, options): # Read gtest shard configuration from environment (e.g. set by swarming). # If none is present, use values passed on the command line. shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count)) shard_run = os.environ.get('GTEST_SHARD_INDEX') if shard_run is not None: # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0. shard_run = int(shard_run) + 1 else: shard_run = options.shard_run if options.shard_count > 1: # Log if a value was passed on the cmd line and it differs from the # environment variables. if options.shard_count != shard_count: print("shard_count from cmd line differs from environment variable " "GTEST_TOTAL_SHARDS") if options.shard_run > 1 and options.shard_run != shard_run: print("shard_run from cmd line differs from environment variable " "GTEST_SHARD_INDEX") if shard_count < 2: return tests if shard_run < 1 or shard_run > shard_count: print "shard-run not a valid number, should be in [1:shard-count]" print "defaulting back to running all tests" return tests count = 0 shard = [] for test in tests: if count % shard_count == shard_run - 1: shard.append(test) count += 1 return shard def Main(): # Use the v8 root as cwd as some test cases use "load" with relative paths. os.chdir(BASE_DIR) parser = BuildOptions() (options, args) = parser.parse_args() if not ProcessOptions(options): parser.print_help() return 1 SetupEnvironment(options) if options.swarming: # Swarming doesn't print how isolated commands are called. Lets make this # less cryptic by printing it ourselves. print ' '.join(sys.argv) exit_code = 0 suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test")) # Use default tests if no test configuration was provided at the cmd line. if len(args) == 0: args = ["default"] # Expand arguments with grouped tests. The args should reflect the list of # suites as otherwise filters would break. def ExpandTestGroups(name): if name in TEST_MAP: return [suite for suite in TEST_MAP[name]] else: return [name] args = reduce(lambda x, y: x + y, [ExpandTestGroups(arg) for arg in args], []) args_suites = OrderedDict() # Used as set for arg in args: args_suites[arg.split('/')[0]] = True suite_paths = [ s for s in args_suites if s in suite_paths ] suites = [] for root in suite_paths: suite = testsuite.TestSuite.LoadTestSuite( os.path.join(BASE_DIR, "test", root)) if suite: suites.append(suite) if options.download_data or options.download_data_only: for s in suites: s.DownloadData() if options.download_data_only: return exit_code for s in suites: s.PrepareSources() for (arch, mode) in options.arch_and_mode: try: code = Execute(arch, mode, args, options, suites) except KeyboardInterrupt: return 2 exit_code = exit_code or code return exit_code def Execute(arch, mode, args, options, suites): print(">>> Running tests for %s.%s" % (arch, mode)) shell_dir = options.shell_dir if not shell_dir: if options.auto_detect: # If an output dir with a build was passed, test directly in that # directory. shell_dir = os.path.join(BASE_DIR, options.outdir) elif options.buildbot: # TODO(machenbach): Get rid of different output folder location on # buildbot. Currently this is capitalized Release and Debug. shell_dir = os.path.join(BASE_DIR, options.outdir, mode) mode = BuildbotToV8Mode(mode) else: shell_dir = os.path.join( BASE_DIR, options.outdir, "%s.%s" % (arch, MODES[mode]["output_folder"]), ) if not os.path.exists(shell_dir): raise Exception('Could not find shell_dir: "%s"' % shell_dir) # Populate context object. mode_flags = MODES[mode]["flags"] # Simulators are slow, therefore allow a longer timeout. if arch in SLOW_ARCHS: options.timeout *= 2 options.timeout *= MODES[mode]["timeout_scalefactor"] if options.predictable: # Predictable mode is slower. options.timeout *= 2 ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir, mode_flags, options.verbose, options.timeout, options.isolates, options.command_prefix, options.extra_flags, options.no_i18n, options.random_seed, options.no_sorting, options.rerun_failures_count, options.rerun_failures_max, options.predictable, options.no_harness, use_perf_data=not options.swarming, sancov_dir=options.sancov_dir) # TODO(all): Combine "simulator" and "simulator_run". # TODO(machenbach): In GN we can derive simulator run from # target_arch != v8_target_arch in the dumped build config. simulator_run = not options.dont_skip_simulator_slow_tests and \ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \ 'ppc', 'ppc64', 's390', 's390x'] and \ bool(ARCH_GUESS) and arch != ARCH_GUESS # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": False, "gc_stress": options.gc_stress, "gcov_coverage": options.gcov_coverage, "isolates": options.isolates, "mode": MODES[mode]["status_mode"], "no_i18n": options.no_i18n, "no_snap": options.no_snap, "simulator_run": simulator_run, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), "tsan": options.tsan, "msan": options.msan, "dcheck_always_on": options.dcheck_always_on, "novfp3": options.novfp3, "predictable": options.predictable, "byteorder": sys.byteorder, "no_harness": options.no_harness, "ubsan_vptr": options.ubsan_vptr, } all_tests = [] num_tests = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests # First filtering by status applying the generic rules (independent of # variants). s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue variant_gen = s.CreateVariantGenerator(VARIANTS) variant_tests = [ t.CopyAddingFlags(v, flags) for t in s.tests for v in variant_gen.FilterVariantsByTest(t) for flags in variant_gen.GetFlagSets(t, v) ] if options.random_seed_stress_count > 1: # Duplicate test for random seed stress mode. def iter_seed_flags(): for i in range(0, options.random_seed_stress_count): # Use given random seed for all runs (set by default in execution.py) # or a new random seed if none is specified. if options.random_seed: yield [] else: yield ["--random-seed=%d" % RandomSeed()] s.tests = [ t.CopyAddingFlags(t.variant, flags) for t in variant_tests for flags in iter_seed_flags() ] else: s.tests = variant_tests # Second filtering by status applying the variant-dependent rules. s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, options.pass_fail_tests, variants=True) s.tests = ShardTests(s.tests, options) num_tests += len(s.tests) if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) # Run the tests, either locally or distributed on the network. start_time = time.time() progress_indicator = progress.IndicatorNotifier() progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]()) if options.junitout: progress_indicator.Register(progress.JUnitTestProgressIndicator( options.junitout, options.junittestsuite)) if options.json_test_results: progress_indicator.Register(progress.JsonTestProgressIndicator( options.json_test_results, arch, MODES[mode]["execution_mode"], ctx.random_seed)) if options.flakiness_results: progress_indicator.Register(progress.FlakinessTestProgressIndicator( options.flakiness_results)) run_networked = not options.no_network if not run_networked: if options.verbose: print("Network distribution disabled, running tests locally.") elif utils.GuessOS() != "linux": print("Network distribution is only supported on Linux, sorry!") run_networked = False peers = [] if run_networked: peers = network_execution.GetPeers() if not peers: print("No connection to distribution server; running tests locally.") run_networked = False elif len(peers) == 1: print("No other peers on the network; running tests locally.") run_networked = False elif num_tests <= 100: print("Less than 100 tests, running them locally.") run_networked = False if run_networked: runner = network_execution.NetworkedRunner(suites, progress_indicator, ctx, peers, BASE_DIR) else: runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) overall_duration = time.time() - start_time if options.time: verbose.PrintTestDurations(suites, overall_duration) if num_tests == 0: print("Warning: no tests were run!") if exit_code == 1 and options.json_test_results: print("Force exit code 0 after failures. Json test results file generated " "with failure information.") exit_code = 0 if options.sancov_dir: # If tests ran with sanitizer coverage, merge coverage files in the end. try: print "Merging sancov files." subprocess.check_call([ sys.executable, join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"), "--coverage-dir=%s" % options.sancov_dir]) except: print >> sys.stderr, "Error: Merging sancov files failed." exit_code = 1 return exit_code if __name__ == "__main__": sys.exit(Main())
from direct.directtools.DirectSelection import * from direct.directtools.DirectUtil import ROUND_TO from direct.directtools.DirectGeometry import LineNodePath from direct.gui.DirectGui import * from pandac.PandaModules import * from direct.showbase.DirectObject import DirectObject from toontown.toonbase import ToontownGlobals from direct.directnotify import DirectNotifyGlobal from direct.task import Task from toontown.catalog import CatalogFurnitureItem from toontown.catalog import CatalogItemTypes from direct.showbase import PythonUtil from toontown.toontowngui import TTDialog from toontown.toonbase import TTLocalizer from otp.otpbase import OTPLocalizer camPos50 = (Point3(0.0, -10.0, 50.0), Point3(0.0, -9.66, 49.06), Point3(0.0, 1.5, 12.38), Point3(0.0, 1.5, -3.1), 1) camPos40 = (Point3(0.0, -15.0, 40.0), Point3(0.0, -14.5, 39.13), Point3(0.0, 1.5, 12.38), Point3(0.0, 1.5, -3.1), 1) camPos30 = (Point3(0.0, -20.0, 30.0), Point3(0.0, -19.29, 29.29), Point3(0.0, 1.5, 12.38), Point3(0.0, 1.5, -3.1), 1) camPos20 = (Point3(0.0, -20.0, 20.0), Point3(0.0, -19.13, 19.5), Point3(0.0, 1.5, 12.38), Point3(0.0, 1.5, -3.1), 1) camPosList = [camPos20, camPos30, camPos40, camPos50] DEFAULT_CAM_INDEX = 2 NormalPickerPanelColor = (1, 0.9, 0.745, 1) DisabledPickerPanelColor = (0.7, 0.65, 0.58, 1) DeletePickerPanelColor = (1, 0.4, 0.4, 1) DisabledDeletePickerPanelColor = (0.7, 0.3, 0.3, 1) class FurnitureItemPanel(DirectButton): def __init__(self, item, itemId, command = None, deleteMode = 0, withinFunc = None, helpCategory = None): self.item = item self.itemId = itemId self.command = command self.origHelpCategory = helpCategory self.deleteMode = deleteMode if self.deleteMode: framePanelColor = DeletePickerPanelColor else: framePanelColor = NormalPickerPanelColor DirectButton.__init__(self, relief=DGG.RAISED, frameSize=(-0.25, 0.25, -0.2, 0.2), frameColor=framePanelColor, borderWidth=(0.02, 0.02), command=self.clicked) if self.deleteMode: helpCategory = 'FurnitureItemPanelDelete' self.bindHelpText(helpCategory) if withinFunc: self.bind(DGG.WITHIN, lambda event: withinFunc(self.itemId)) self.initialiseoptions(FurnitureItemPanel) self.load() def show(self): DirectFrame.show(self) if self.ival: self.ival.resume() def hide(self): DirectFrame.hide(self) if self.ival: self.ival.pause() def load(self): panelWidth = 7 panelCenter = 0 self.picture, self.ival = self.item.getPicture(base.localAvatar) if self.picture: self.picture.reparentTo(self) self.picture.setScale(0.14) self.picture.setPos(0, 0, -0.02) text = self.item.getName() text_pos = (0, -0.1, 0) else: text = self.item.getTypeName() + ': ' + self.item.getName() text_pos = (0, -0.3, 0) if self.ival: self.ival.loop() self.ival.pause() self.nameLabel = DirectLabel(parent=self, relief=None, pos=(0, 0, 0.17), scale=0.45, text=text, text_scale=0.15, text_fg=(0, 0, 0, 1), text_pos=text_pos, text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=panelWidth) return def clicked(self): self.command(self.item, self.itemId) def unload(self): if self.item.hasPicture: self.item.cleanupPicture() del self.item self.nameLabel.destroy() del self.nameLabel if self.ival: self.ival.finish() del self.ival del self.picture self.command = None return def destroy(self): self.unload() DirectButton.destroy(self) def bindHelpText(self, category): self.unbind(DGG.ENTER) self.unbind(DGG.EXIT) if category is None: category = self.origHelpCategory self.bind(DGG.ENTER, base.cr.objectManager.showHelpText, extraArgs=[category, self.item.getName()]) self.bind(DGG.EXIT, base.cr.objectManager.hideHelpText) return def setDeleteMode(self, deleteMode): self.deleteMode = deleteMode self.__updateAppearance() def enable(self, enabled): if enabled: self['state'] = DGG.NORMAL else: self['state'] = DGG.DISABLED self.__updateAppearance() def __updateAppearance(self): color = NormalPickerPanelColor relief = DGG.RAISED if self.deleteMode: if self['state'] == DGG.DISABLED: color = DisabledDeletePickerPanelColor relief = DGG.SUNKEN else: color = DeletePickerPanelColor relief = DGG.RAISED elif self['state'] == DGG.DISABLED: color = DisabledPickerPanelColor relief = DGG.SUNKEN else: color = NormalPickerPanelColor relief = DGG.RAISED self['frameColor'] = color class MovableObject(NodePath, DirectObject): def __init__(self, dfitem, parent = render): NodePath.__init__(self) self.assign(dfitem) self.dfitem = dfitem dfitem.transmitRelativeTo = dfitem.getParent() self.reparentTo(parent) self.setTag('movableObject', '1') self.builtInCNodes = self.findAllMatches('**/+CollisionNode') self.numBuiltInNodes = self.builtInCNodes.getNumPaths() self.stashBuiltInCollisionNodes() shadows = self.findAllMatches('**/*shadow*') shadows.addPathsFrom(self.findAllMatches('**/*Shadow*')) shadows.stash() flags = self.dfitem.item.getFlags() if flags & CatalogFurnitureItem.FLPainting: self.setOnFloor(0) self.setOnWall(1) else: self.setOnFloor(1) self.setOnWall(0) if flags & CatalogFurnitureItem.FLOnTable: self.setOnTable(1) else: self.setOnTable(0) if flags & CatalogFurnitureItem.FLRug: self.setIsRug(1) else: self.setIsRug(0) if flags & CatalogFurnitureItem.FLIsTable: self.setIsTable(1) else: self.setIsTable(0) m = self.getTransform() self.iPosHpr() bMin, bMax = self.bounds = self.getTightBounds() bMin -= Vec3(0.1, 0.1, 0) bMax += Vec3(0.1, 0.1, 0) self.c0 = Point3(bMin[0], bMin[1], 0.2) self.c1 = Point3(bMax[0], bMin[1], 0.2) self.c2 = Point3(bMax[0], bMax[1], 0.2) self.c3 = Point3(bMin[0], bMax[1], 0.2) self.center = (bMin + bMax) / 2.0 if flags & CatalogFurnitureItem.FLPainting: self.dragPoint = Vec3(self.center[0], bMax[1], self.center[2]) else: self.dragPoint = Vec3(self.center[0], self.center[1], bMin[2]) delta = self.dragPoint - self.c0 self.radius = min(delta[0], delta[1]) if self.getOnWall(): self.setWallOffset(0.1) else: self.setWallOffset(self.radius + 0.1) self.makeCollisionBox() self.setTransform(m) self.unstashBuiltInCollisionNodes() shadows.unstash() def resetMovableObject(self): self.unstashBuiltInCollisionNodes() self.collisionNodePath.removeNode() self.clearTag('movableObject') def setOnFloor(self, fOnFloor): self.fOnFloor = fOnFloor def getOnFloor(self): return self.fOnFloor def setOnWall(self, fOnWall): self.fOnWall = fOnWall def getOnWall(self): return self.fOnWall def setOnTable(self, fOnTable): self.fOnTable = fOnTable def getOnTable(self): return self.fOnTable def setIsRug(self, fIsRug): self.fIsRug = fIsRug def getIsRug(self): return self.fIsRug def setIsTable(self, fIsTable): self.fIsTable = fIsTable def getIsTable(self): return self.fIsTable def setWallOffset(self, offset): self.wallOffset = offset def getWallOffset(self): return self.wallOffset def destroy(self): self.removeNode() def stashBuiltInCollisionNodes(self): self.builtInCNodes.stash() def unstashBuiltInCollisionNodes(self): self.builtInCNodes.unstash() def getFloorBitmask(self): if self.getOnTable(): return ToontownGlobals.FloorBitmask | ToontownGlobals.FurnitureTopBitmask else: return ToontownGlobals.FloorBitmask def getWallBitmask(self): if self.getIsRug() or self.getOnWall(): return ToontownGlobals.WallBitmask else: return ToontownGlobals.WallBitmask | ToontownGlobals.FurnitureSideBitmask def makeCollisionBox(self): self.collisionNodePath = self.attachNewNode('furnitureCollisionNode') if self.getIsRug() or self.getOnWall(): return mx = self.bounds[0][0] - 0.01 Mx = self.bounds[1][0] + 0.01 my = self.bounds[0][1] - 0.01 My = self.bounds[1][1] + 0.01 mz = self.bounds[0][2] Mz = self.bounds[1][2] cn = CollisionNode('sideCollisionNode') cn.setIntoCollideMask(ToontownGlobals.FurnitureSideBitmask) self.collisionNodePath.attachNewNode(cn) cp = CollisionPolygon(Point3(mx, My, mz), Point3(mx, my, mz), Point3(mx, my, Mz), Point3(mx, My, Mz)) cn.addSolid(cp) cp = CollisionPolygon(Point3(Mx, my, mz), Point3(Mx, My, mz), Point3(Mx, My, Mz), Point3(Mx, my, Mz)) cn.addSolid(cp) cp = CollisionPolygon(Point3(mx, my, mz), Point3(Mx, my, mz), Point3(Mx, my, Mz), Point3(mx, my, Mz)) cn.addSolid(cp) cp = CollisionPolygon(Point3(Mx, My, mz), Point3(mx, My, mz), Point3(mx, My, Mz), Point3(Mx, My, Mz)) cn.addSolid(cp) if self.getIsTable(): cn = CollisionNode('topCollisionNode') cn.setIntoCollideMask(ToontownGlobals.FurnitureTopBitmask) self.collisionNodePath.attachNewNode(cn) cp = CollisionPolygon(Point3(mx, my, Mz), Point3(Mx, my, Mz), Point3(Mx, My, Mz), Point3(mx, My, Mz)) cn.addSolid(cp) class ObjectManager(NodePath, DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('ObjectManager') def __init__(self): NodePath.__init__(self) self.assign(render.attachNewNode('objectManager')) self.objectDict = {} self.selectedObject = None self.movingObject = 0 self.deselectEvent = None self.startPose = render.attachNewNode('startPose') self.dragPointNP = self.attachNewNode('dragPoint') self.gridSnapNP = self.dragPointNP.attachNewNode('gridSnap') self.collisionOffsetNP = self.gridSnapNP.attachNewNode('collisionResponse') self.iRay = SelectionRay() self.iSegment = SelectionSegment(numSegments=6) self.iSegment4 = SelectionSegment(numSegments=4) self.iSphere = SelectionSphere() self.houseExtents = None self.doorBlocker = None cp = CollisionPolygon(Point3(-100, -100, 0), Point3(100, -100, 0), Point3(100, 100, 0), Point3(-100, 100, 0)) cn = CollisionNode('dragCollisionNode') cn.addSolid(cp) cn.setIntoCollideMask(ToontownGlobals.FurnitureDragBitmask) self.collisionNP = NodePath(cn) self.lnp = LineNodePath() self.fRecenter = 0 self.gridSpacing = None self.firstTime = 0 guiModels = loader.loadModel('phase_5.5/models/gui/house_design_gui') self.createSelectedObjectPanel(guiModels) self.createMainControls(guiModels) self.furnitureManager = None self.atticPicker = None self.inRoomPicker = None self.inTrashPicker = None self.dialog = None self.deleteMode = 0 self.nonDeletableItem = None self.verifyFrame = None self.deleteItemText = None self.okButton = None self.cancelButton = None self.itemIval = None self.itemPanel = None self.guiInterval = None self.accept('enterFurnitureMode', self.enterFurnitureMode) self.accept('exitFurnitureMode', self.exitFurnitureMode) return def enterFurnitureMode(self, furnitureManager, fDirector): if not fDirector: if self.furnitureManager: self.exitFurnitureMode(self.furnitureManager) return if furnitureManager == self.furnitureManager: return if self.furnitureManager != None: self.exitFurnitureMode(self.furnitureManager) self.notify.info('enterFurnitureMode, fDirector = %s' % fDirector) self.furnitureManager = furnitureManager self.furnitureManager.d_avatarEnter() house = furnitureManager.getInteriorObject() house.hideExteriorWindows() self.setTargetNodePath(house.interior) self.createAtticPicker() self.initializeDistributedFurnitureItems(furnitureManager.dfitems) self.setCamPosIndex(DEFAULT_CAM_INDEX) base.localAvatar.setGhostMode(1) taskMgr.remove('editModeTransition') self.orientCamH(base.localAvatar.getH(self.targetNodePath)) self.accept('mouse1', self.moveObjectStart) self.accept('mouse1-up', self.moveObjectStop) self.furnitureGui.show() self.deleteMode = 0 self.__updateDeleteButtons() self.showAtticPicker() base.localAvatar.laffMeter.stop() base.setCellsAvailable(base.leftCells + [base.bottomCells[0]], 0) if self.guiInterval: self.guiInterval.finish() self.guiInterval = self.furnitureGui.posHprScaleInterval(1.0, Point3(-1.16, 1, -0.03), Vec3(0), Vec3(0.06), startPos=Point3(-1.19, 1, 0.33), startHpr=Vec3(0), startScale=Vec3(0.04), blendType='easeInOut', name='lerpFurnitureButton') self.guiInterval.start() taskMgr.add(self.recenterButtonFrameTask, 'recenterButtonFrameTask', 10) messenger.send('wakeup') return def exitFurnitureMode(self, furnitureManager): if furnitureManager != self.furnitureManager: return self.notify.info('exitFurnitureMode') house = furnitureManager.getInteriorObject() if house: house.showExteriorWindows() self.furnitureManager.d_avatarExit() self.furnitureManager = None base.localAvatar.setCameraPositionByIndex(0) self.exitDeleteMode() self.houseExtents.detachNode() self.doorBlocker.detachNode() self.deselectObject() self.ignore('mouse1') self.ignore('mouse1-up') if self.atticPicker: self.atticPicker.destroy() self.atticPicker = None if self.inRoomPicker: self.inRoomPicker.destroy() self.inRoomPicker = None if self.inTrashPicker: self.inTrashPicker.destroy() self.inTrashPicker = None self.__cleanupVerifyDelete() self.furnitureGui.hide() base.setCellsAvailable(base.leftCells + [base.bottomCells[0]], 1) base.localAvatar.laffMeter.start() taskMgr.remove('recenterButtonFrameTask') self.cleanupDialog() taskMgr.remove('showHelpTextDoLater') messenger.send('wakeup') return def initializeDistributedFurnitureItems(self, dfitems): self.objectDict = {} for item in dfitems: mo = MovableObject(item, parent=self.targetNodePath) self.objectDict[mo.id()] = mo def setCamPosIndex(self, index): self.camPosIndex = index base.localAvatar.setCameraSettings(camPosList[index]) def zoomCamIn(self): self.setCamPosIndex(max(0, self.camPosIndex - 1)) messenger.send('wakeup') def zoomCamOut(self): self.setCamPosIndex(min(len(camPosList) - 1, self.camPosIndex + 1)) messenger.send('wakeup') def rotateCamCW(self): self.orientCamH(base.localAvatar.getH(self.targetNodePath) - 90) messenger.send('wakeup') def rotateCamCCW(self): self.orientCamH(base.localAvatar.getH(self.targetNodePath) + 90) messenger.send('wakeup') def orientCamH(self, toonH): targetH = ROUND_TO(toonH, 90) base.localAvatar.hprInterval(duration=1, hpr=Vec3(targetH, 0, 0), other=self.targetNodePath, blendType='easeInOut', name='editModeTransition').start() def setTargetNodePath(self, nodePath): self.targetNodePath = nodePath if self.houseExtents: self.houseExtents.removeNode() if self.doorBlocker: self.doorBlocker.removeNode() self.makeHouseExtentsBox() self.makeDoorBlocker() self.collisionNP.reparentTo(self.targetNodePath) def loadObject(self, filename): mo = MovableObject(filename, parent=self.targetNodePath) self.objectDict[mo.id()] = mo self.selectObject(mo) return mo def pickObject(self): self.iRay.setParentNP(base.cam) entry = self.iRay.pickGeom(targetNodePath=self.targetNodePath, skipFlags=SKIP_ALL) if entry: nodePath = entry.getIntoNodePath() if self.isMovableObject(nodePath): self.selectObject(self.findObject(nodePath)) return self.deselectObject() def pickInRoom(self, objectId): self.selectObject(self.objectDict.get(objectId)) def selectObject(self, selectedObject): messenger.send('wakeup') if self.selectedObject: self.deselectObject() if selectedObject: self.selectedObject = selectedObject self.deselectEvent = self.selectedObject.dfitem.uniqueName('disable') self.acceptOnce(self.deselectEvent, self.deselectObject) self.lnp.reset() self.lnp.reparentTo(selectedObject) self.lnp.moveTo(selectedObject.c0) self.lnp.drawTo(selectedObject.c1) self.lnp.drawTo(selectedObject.c2) self.lnp.drawTo(selectedObject.c3) self.lnp.drawTo(selectedObject.c0) self.lnp.create() self.buttonFrame.show() self.enableButtonFrameTask() self.sendToAtticButton.show() self.atticRoof.hide() def deselectObject(self): self.moveObjectStop() if self.deselectEvent: self.ignore(self.deselectEvent) self.deselectEvent = None self.selectedObject = None self.lnp.detachNode() self.buttonFrame.hide() self.disableButtonFrameTask() self.sendToAtticButton.hide() self.atticRoof.show() return def isMovableObject(self, nodePath): return nodePath.hasNetTag('movableObject') def findObject(self, nodePath): np = nodePath.findNetTag('movableObject') if np.isEmpty(): return None else: return self.objectDict.get(np.id(), None) return None def moveObjectStop(self, *args): if self.movingObject: self.movingObject = 0 taskMgr.remove('moveObjectTask') if self.selectedObject: self.selectedObject.wrtReparentTo(self.targetNodePath) self.selectedObject.collisionNodePath.unstash() self.selectedObject.dfitem.stopAdjustPosHpr() for object in self.objectDict.values(): object.unstashBuiltInCollisionNodes() self.centerMarker['image'] = [self.grabUp, self.grabDown, self.grabRollover] self.centerMarker.configure(text=['', TTLocalizer.HDMoveLabel], text_pos=(0, 1), text_scale=0.7, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image_scale=0.3) def moveObjectStart(self): self.moveObjectStop() self.pickObject() self.moveObjectContinue() def moveObjectContinue(self, *args): messenger.send('wakeup') if self.selectedObject: for object in self.objectDict.values(): object.stashBuiltInCollisionNodes() self.selectedObject.collisionNodePath.stash() self.selectedObject.dfitem.startAdjustPosHpr() self.firstTime = 1 self.iPosHpr() self.startPoseValid = 0 self.centerMarker['image'] = self.grabDown self.centerMarker.configure(text=TTLocalizer.HDMoveLabel, text_pos=(0, 1), text_scale=0.7, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image_scale=0.3) taskMgr.add(self.moveObjectTask, 'moveObjectTask') self.movingObject = 1 def setLnpColor(self, r, g, b): for i in range(5): self.lnp.lineSegs.setVertexColor(i, r, g, b) def markNewPosition(self, isValid): if not isValid: if self.startPoseValid: self.collisionOffsetNP.setPosHpr(self.startPose, self.selectedObject.dragPoint, Vec3(0)) else: self.startPoseValid = 1 def moveObjectTask(self, state): so = self.selectedObject target = self.targetNodePath self.startPose.iPosHpr(so) self.iRay.setParentNP(base.cam) entry = self.iRay.pickBitMask(bitMask=ToontownGlobals.FurnitureDragBitmask, targetNodePath=target, skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE) if not entry: return Task.cont self.setPos(base.cam, entry.getSurfacePoint(base.cam)) if self.firstTime: self.moveObjectInit() self.firstTime = 0 else: self.gridSnapNP.iPos() self.collisionOffsetNP.iPosHpr() if self.gridSpacing: pos = self.dragPointNP.getPos(target) self.gridSnapNP.setPos(target, ROUND_TO(pos[0], self.gridSpacing), ROUND_TO(pos[1], self.gridSpacing), pos[2]) self.iRay.setParentNP(base.cam) entry = self.iRay.pickBitMask3D(bitMask=so.getWallBitmask(), targetNodePath=target, dir=Vec3(self.getNearProjectionPoint(self.gridSnapNP)), skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE) fWall = 0 if not so.getOnTable(): while entry: intoMask = entry.getIntoNodePath().node().getIntoCollideMask() fClosest = (intoMask & ToontownGlobals.WallBitmask).isZero() if self.alignObject(entry, target, fClosest=fClosest): fWall = 1 break entry = self.iRay.findNextCollisionEntry(skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE) if so.getOnWall(): self.markNewPosition(fWall) return Task.cont self.iRay.setParentNP(target) entry = self.iRay.pickBitMask3D(bitMask=so.getFloorBitmask(), targetNodePath=target, origin=Point3(self.gridSnapNP.getPos(target) + Vec3(0, 0, 10)), dir=Vec3(0, 0, -1), skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE) if not entry: self.markNewPosition(0) return Task.cont nodePath = entry.getIntoNodePath() if self.isMovableObject(nodePath): self.gridSnapNP.setPos(target, Point3(entry.getSurfacePoint(target))) else: self.gridSnapNP.setPos(target, Point3(entry.getSurfacePoint(target) + Vec3(0, 0, ToontownGlobals.FloorOffset))) if not fWall: self.iSphere.setParentNP(self.gridSnapNP) self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25) entry = self.iSphere.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=target, skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) if entry: self.alignObject(entry, target, fClosest=1) isValid = self.collisionTest() self.markNewPosition(isValid) return Task.cont def collisionTest(self): so = self.selectedObject target = self.targetNodePath entry = self.segmentCollision() if not entry: return 1 offsetDict = {} while entry: offset = self.computeSegmentOffset(entry) if offset: eid = entry.getInto() maxOffsetVec = offsetDict.get(eid, Vec3(0)) if offset.length() > maxOffsetVec.length(): maxOffsetVec.assign(offset) offsetDict[eid] = maxOffsetVec entry = self.iSegment.findNextCollisionEntry(skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) if offsetDict: keys = offsetDict.keys() ortho1 = offsetDict[keys[0]] ortho2 = Vec3(0) v1 = Vec3(ortho1) v1.normalize() for key in keys[1:]: offset = offsetDict[key] v2 = Vec3(offset) v2.normalize() dp = v1.dot(v2) if abs(dp) > 0.95: if offset.length() > ortho1.length(): ortho1.assign(offset) elif abs(dp) < 0.05: if offset.length() > ortho2.length(): ortho2.assign(offset) else: o1Len = ortho1.length() parallelVec = Vec3(ortho1 * offset.dot(ortho1) / (o1Len * o1Len)) perpVec = Vec3(offset - parallelVec) if parallelVec.length() > o1Len: ortho1.assign(parallelVec) if perpVec.length() > ortho2.length(): ortho2.assign(perpVec) totalOffset = ortho1 + ortho2 self.collisionOffsetNP.setPos(self.collisionOffsetNP, totalOffset) if not self.segmentCollision(): return 1 m = self.startPose.getMat(so) deltaMove = Vec3(m.getRow3(3)) if deltaMove.length() == 0: return 1 self.iSegment4.setParentNP(so) entry = self.iSegment4.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=target, endPointList=[(so.c0, Point3(m.xformPoint(so.c0))), (so.c1, Point3(m.xformPoint(so.c1))), (so.c2, Point3(m.xformPoint(so.c2))), (so.c3, Point3(m.xformPoint(so.c3)))], skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) maxLen = 0 maxOffset = None while entry: offset = Vec3(entry.getSurfacePoint(entry.getFromNodePath()) - entry.getFrom().getPointA()) offsetLen = Vec3(offset).length() if offsetLen > maxLen: maxLen = offsetLen maxOffset = offset entry = self.iSegment4.findNextCollisionEntry(skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) if maxOffset: self.collisionOffsetNP.setPos(self.collisionOffsetNP, maxOffset) if not self.segmentCollision(): return 1 return 0 def segmentCollision(self): so = self.selectedObject self.iSegment.setParentNP(so) entry = self.iSegment.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=self.targetNodePath, endPointList=[(so.c0, so.c1), (so.c1, so.c2), (so.c2, so.c3), (so.c3, so.c0), (so.c0, so.c2), (so.c1, so.c3)], skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) return entry def computeSegmentOffset(self, entry): fromNodePath = entry.getFromNodePath() if entry.hasSurfaceNormal(): normal = entry.getSurfaceNormal(fromNodePath) else: return None hitPoint = entry.getSurfacePoint(fromNodePath) m = self.selectedObject.getMat(self.startPose) hp = Point3(m.xformPoint(hitPoint)) hpn = Vec3(m.xformVec(normal)) hitPointVec = Vec3(hp - self.selectedObject.dragPoint) if hitPointVec.dot(hpn) > 0: return None nLen = normal.length() offsetVecA = hitPoint - entry.getFrom().getPointA() offsetA = normal * offsetVecA.dot(normal) / (nLen * nLen) if offsetA.dot(normal) > 0: return offsetA * 1.01 else: offsetVecB = hitPoint - entry.getFrom().getPointB() offsetB = normal * offsetVecB.dot(normal) / (nLen * nLen) return offsetB * 1.01 return None def alignObject(self, entry, target, fClosest = 0, wallOffset = None): if not entry.hasSurfaceNormal(): return 0 normal = entry.getSurfaceNormal(target) if abs(normal.dot(Vec3(0, 0, 1))) < 0.1: tempNP = target.attachNewNode('temp') normal.setZ(0) normal.normalize() lookAtNormal = Point3(normal) lookAtNormal *= -1 tempNP.lookAt(lookAtNormal) realAngle = ROUND_TO(self.gridSnapNP.getH(tempNP), 90.0) if fClosest: angle = realAngle else: angle = 0 self.gridSnapNP.setHpr(tempNP, angle, 0, 0) hitPoint = entry.getSurfacePoint(target) tempNP.setPos(hitPoint) if wallOffset == None: wallOffset = self.selectedObject.getWallOffset() self.gridSnapNP.setPos(tempNP, 0, -wallOffset, 0) tempNP.removeNode() if realAngle == 180.0: self.gridSnapNP.setH(self.gridSnapNP.getH() + 180.0) return 1 return 0 def rotateLeft(self): if not self.selectedObject: return so = self.selectedObject so.dfitem.startAdjustPosHpr() self.iPosHpr(so) self.moveObjectInit() if so.getOnWall(): startR = self.gridSnapNP.getR() newR = ROUND_TO(startR + 22.5, 22.5) self.gridSnapNP.setR(newR) else: startH = self.gridSnapNP.getH(self.targetNodePath) newH = ROUND_TO(startH - 22.5, 22.5) self.iSphere.setParentNP(self.gridSnapNP) self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25) entry = self.iSphere.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=self.targetNodePath, skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) if not entry: self.gridSnapNP.setHpr(self.targetNodePath, newH, 0, 0) self.collisionTest() so.wrtReparentTo(self.targetNodePath) self.disableButtonFrameTask() so.dfitem.stopAdjustPosHpr() def rotateRight(self): if not self.selectedObject: return so = self.selectedObject so.dfitem.startAdjustPosHpr() self.iPosHpr(so) self.moveObjectInit() if so.getOnWall(): startR = self.gridSnapNP.getR() newR = ROUND_TO(startR - 22.5, 22.5) self.gridSnapNP.setR(newR) else: startH = self.gridSnapNP.getH(self.targetNodePath) newH = ROUND_TO(startH + 22.5, 22.5) % 360.0 self.iSphere.setParentNP(self.gridSnapNP) self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25) entry = self.iSphere.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=self.targetNodePath, skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE) if not entry: self.gridSnapNP.setHpr(self.targetNodePath, newH, 0, 0) self.collisionTest() so.wrtReparentTo(self.targetNodePath) self.disableButtonFrameTask() so.dfitem.stopAdjustPosHpr() def moveObjectInit(self): self.dragPointNP.setPosHpr(self.selectedObject, self.selectedObject.dragPoint, Vec3(0)) self.gridSnapNP.iPosHpr() self.collisionOffsetNP.iPosHpr() self.selectedObject.wrtReparentTo(self.collisionOffsetNP) def resetFurniture(self): for o in self.objectDict.values(): o.resetMovableObject() self.objectDict = {} self.deselectObject() self.buttonFrame.hide() def destroy(self): self.ignore('enterFurnitureMode') self.ignore('exitFurnitureMode') if self.guiInterval: self.guiInterval.finish() if self.furnitureManager: self.exitFurnitureMode(self.furnitureManager) self.cleanupDialog() self.resetFurniture() self.buttonFrame.destroy() self.furnitureGui.destroy() if self.houseExtents: self.houseExtents.removeNode() if self.doorBlocker: self.doorBlocker.removeNode() self.removeNode() if self.verifyFrame: self.verifyFrame.destroy() self.verifyFrame = None self.deleteItemText = None self.okButton = None self.cancelButton = None return def createSelectedObjectPanel(self, guiModels): self.buttonFrame = DirectFrame(scale=0.5) self.grabUp = guiModels.find('**/handup') self.grabDown = guiModels.find('**/handdown') self.grabRollover = guiModels.find('**/handrollover') self.centerMarker = DirectButton(parent=self.buttonFrame, text=['', TTLocalizer.HDMoveLabel], text_pos=(0, 1), text_scale=0.7, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=[self.grabUp, self.grabDown, self.grabRollover], image_scale=0.3, relief=None, scale=0.12) self.centerMarker.bind(DGG.B1PRESS, self.moveObjectContinue) self.centerMarker.bind(DGG.B1RELEASE, self.moveObjectStop) guiCCWArrowUp = guiModels.find('**/LarrowUp') guiCCWArrowDown = guiModels.find('**/LarrowDown') guiCCWArrowRollover = guiModels.find('**/LarrowRollover') self.rotateLeftButton = DirectButton(parent=self.buttonFrame, relief=None, image=(guiCCWArrowUp, guiCCWArrowDown, guiCCWArrowRollover, guiCCWArrowUp), image_pos=(0, 0, 0.1), image_scale=0.15, image3_color=Vec4(0.5, 0.5, 0.5, 0.75), text=('', TTLocalizer.HDRotateCCWLabel, TTLocalizer.HDRotateCCWLabel, ''), text_pos=(0.135, -0.1), text_scale=0.1, text_align=TextNode.ARight, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(-.125, 0, -.2), scale=0.7, command=self.rotateLeft) self.rotateLeftButton.bind(DGG.EXIT, self.enableButtonFrameTask) guiCWArrowUp = guiModels.find('**/RarrowUp') guiCWArrowDown = guiModels.find('**/RarrowDown') guiCWArrowRollover = guiModels.find('**/RarrowRollover') self.rotateRightButton = DirectButton(parent=self.buttonFrame, relief=None, image=(guiCWArrowUp, guiCWArrowDown, guiCWArrowRollover, guiCWArrowUp), image_pos=(0, 0, 0.1), image_scale=0.15, image3_color=Vec4(0.5, 0.5, 0.5, 0.75), text=('', TTLocalizer.HDRotateCWLabel, TTLocalizer.HDRotateCWLabel, ''), text_pos=(-0.135, -0.1), text_scale=0.1, text_align=TextNode.ALeft, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(0.125, 0, -0.2), scale=0.7, command=self.rotateRight) self.rotateRightButton.bind(DGG.EXIT, self.enableButtonFrameTask) self.buttonFrame.hide() return def recenterButtonFrameTask(self, state): if self.selectedObject and self.fRecenter: self.buttonFrame.setPos(self.getSelectedObjectScreenXY()) return Task.cont def disableButtonFrameTask(self, event = None): self.fRecenter = 0 def enableButtonFrameTask(self, event = None): self.fRecenter = 1 def getNearProjectionPoint(self, nodePath): origin = nodePath.getPos(camera) if origin[1] != 0.0: return origin * (base.camLens.getNear() / origin[1]) else: return Point3(0, base.camLens.getNear(), 0) def getSelectedObjectScreenXY(self): tNodePath = self.selectedObject.attachNewNode('temp') tNodePath.setPos(self.selectedObject.center) nearVec = self.getNearProjectionPoint(tNodePath) nearVec *= base.camLens.getFocalLength() / base.camLens.getNear() render2dX = CLAMP(nearVec[0] / (base.camLens.getFilmSize()[0] / 2.0), -.9, 0.9) aspect2dX = render2dX * base.getAspectRatio() aspect2dZ = CLAMP(nearVec[2] / (base.camLens.getFilmSize()[1] / 2.0), -.8, 0.9) tNodePath.removeNode() return Vec3(aspect2dX, 0, aspect2dZ) def createMainControls(self, guiModels): attic = guiModels.find('**/attic') self.furnitureGui = DirectFrame(relief=None, pos=(-1.19, 1, 0.33), scale=0.04, image=attic) bMoveStopUp = guiModels.find('**/bu_atticX/bu_attic_up') bMoveStopDown = guiModels.find('**/bu_atticX/bu_attic_down') bMoveStopRollover = guiModels.find('**/bu_atticX/bu_attic_rollover') self.bStopMoveFurniture = DirectButton(parent=self.furnitureGui, relief=None, image=[bMoveStopUp, bMoveStopDown, bMoveStopRollover, bMoveStopUp], text=['', TTLocalizer.HDStopMoveFurnitureButton, TTLocalizer.HDStopMoveFurnitureButton], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_font=ToontownGlobals.getInterfaceFont(), pos=(-0.3, 0, 9.4), command=base.localAvatar.stopMoveFurniture) self.bindHelpText(self.bStopMoveFurniture, 'DoneMoving') self.atticRoof = DirectLabel(parent=self.furnitureGui, relief=None, image=guiModels.find('**/rooftile')) self.itemBackgroundFrame = DirectFrame(parent=self.furnitureGui, relief=None, image=guiModels.find('**/item_backgroun'), image_pos=(0, 0, -22), image_scale=(1, 1, 5)) self.scrollUpFrame = DirectFrame(parent=self.furnitureGui, relief=None, image=guiModels.find('**/scrollup'), pos=(0, 0, -0.58)) self.camButtonFrame = DirectFrame(parent=self.furnitureGui, relief=None, image=guiModels.find('**/low'), pos=(0, 0, -11.69)) tagUp = guiModels.find('**/tag_up') tagDown = guiModels.find('**/tag_down') tagRollover = guiModels.find('**/tag_rollover') self.inAtticButton = DirectButton(parent=self.itemBackgroundFrame, relief=None, text=TTLocalizer.HDInAtticLabel, text_pos=(-0.1, -0.25), image=[tagUp, tagDown, tagRollover], pos=(2.85, 0, 4), scale=0.8, command=self.showAtticPicker) self.bindHelpText(self.inAtticButton, 'Attic') self.inRoomButton = DirectButton(parent=self.itemBackgroundFrame, relief=None, text=TTLocalizer.HDInRoomLabel, text_pos=(-0.1, -0.25), image=[tagUp, tagDown, tagRollover], pos=(2.85, 0, 1.1), scale=0.8, command=self.showInRoomPicker) self.bindHelpText(self.inRoomButton, 'Room') self.inTrashButton = DirectButton(parent=self.itemBackgroundFrame, relief=None, text=TTLocalizer.HDInTrashLabel, text_pos=(-0.1, -0.25), image=[tagUp, tagDown, tagRollover], pos=(2.85, 0, -1.8), scale=0.8, command=self.showInTrashPicker) self.bindHelpText(self.inTrashButton, 'Trash') for i in range(4): self.inAtticButton.component('text%d' % i).setR(-90) self.inRoomButton.component('text%d' % i).setR(-90) self.inTrashButton.component('text%d' % i).setR(-90) backInAtticUp = guiModels.find('**/bu_backinattic_up1') backInAtticDown = guiModels.find('**/bu_backinattic_down1') backInAtticRollover = guiModels.find('**/bu_backinattic_rollover2') self.sendToAtticButton = DirectButton(parent=self.furnitureGui, relief=None, pos=(0.4, 0, 12.8), text=['', TTLocalizer.HDToAtticLabel], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_pos=(1.2, -0.3), image=[backInAtticUp, backInAtticDown, backInAtticRollover], command=self.sendItemToAttic) self.sendToAtticButton.hide() self.bindHelpText(self.sendToAtticButton, 'SendToAttic') zoomInUp = guiModels.find('**/bu_RzoomOut_up') zoomInDown = guiModels.find('**/bu_RzoomOut_down') zoomInRollover = guiModels.find('**/bu_RzoomOut_rollover') self.zoomInButton = DirectButton(parent=self.camButtonFrame, image=[zoomInUp, zoomInDown, zoomInRollover], relief=None, pos=(0.9, 0, -0.75), command=self.zoomCamIn) self.bindHelpText(self.zoomInButton, 'ZoomIn') zoomOutUp = guiModels.find('**/bu_LzoomIn_up') zoomOutDown = guiModels.find('**/bu_LzoomIn_down') zoomOutRollover = guiModels.find('**/buLzoomIn_rollover') self.zoomOutButton = DirectButton(parent=self.camButtonFrame, image=[zoomOutUp, zoomOutDown, zoomOutRollover], relief=None, pos=(-1.4, 0, -0.75), command=self.zoomCamOut) self.bindHelpText(self.zoomOutButton, 'ZoomOut') camCCWUp = guiModels.find('**/bu_Rarrow_up1') camCCWDown = guiModels.find('**/bu_Rarrow_down1') camCCWRollover = guiModels.find('**/bu_Rarrow_orllover') self.rotateCamLeftButton = DirectButton(parent=self.camButtonFrame, image=[camCCWUp, camCCWDown, camCCWRollover], relief=None, pos=(0.9, 0, -3.0), command=self.rotateCamCCW) self.bindHelpText(self.rotateCamLeftButton, 'RotateLeft') camCWUp = guiModels.find('**/bu_Larrow_up1') camCWDown = guiModels.find('**/bu_Larrow_down1') camCWRollover = guiModels.find('**/bu_Larrow_rollover2') self.rotateCamRightButton = DirectButton(parent=self.camButtonFrame, image=[camCWUp, camCWDown, camCWRollover], relief=None, pos=(-1.4, 0, -3.0), command=self.rotateCamCW) self.bindHelpText(self.rotateCamRightButton, 'RotateRight') trashcanGui = loader.loadModel('phase_3/models/gui/trashcan_gui') trashcanUp = trashcanGui.find('**/TrashCan_CLSD') trashcanDown = trashcanGui.find('**/TrashCan_OPEN') trashcanRollover = trashcanGui.find('**/TrashCan_RLVR') self.deleteEnterButton = DirectButton(parent=self.furnitureGui, image=(trashcanUp, trashcanDown, trashcanRollover, trashcanUp), text=['', TTLocalizer.InventoryDelete, TTLocalizer.InventoryDelete, ''], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_align=TextNode.ACenter, text_pos=(0, -0.12), text_font=ToontownGlobals.getInterfaceFont(), textMayChange=0, relief=None, pos=(3.7, 0.0, -13.8), scale=7.13, command=self.enterDeleteMode) self.bindHelpText(self.deleteEnterButton, 'DeleteEnter') self.deleteExitButton = DirectButton(parent=self.furnitureGui, image=(trashcanUp, trashcanDown, trashcanRollover, trashcanUp), text=('', TTLocalizer.InventoryDone, TTLocalizer.InventoryDone, ''), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_align=TextNode.ACenter, text_pos=(0, -0.12), text_font=ToontownGlobals.getInterfaceFont(), textMayChange=0, relief=None, pos=(3.7, 0.0, -13.8), scale=7.13, command=self.exitDeleteMode) self.bindHelpText(self.deleteExitButton, 'DeleteExit') self.deleteExitButton.hide() self.trashcanBase = DirectLabel(parent=self.furnitureGui, image=guiModels.find('**/trashcan_base'), relief=None, pos=(0, 0, -11.64)) self.furnitureGui.hide() self.helpText = DirectLabel(parent=self.furnitureGui, relief=DGG.SUNKEN, frameSize=(-0.5, 10, -3, 0.9), frameColor=(0.2, 0.2, 0.2, 0.5), borderWidth=(0.01, 0.01), text='', text_wordwrap=12, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.8, pos=(3, 0.0, -7), scale=1, text_align=TextNode.ALeft) self.helpText.hide() return def createAtticPicker(self): self.atticItemPanels = [] for itemIndex in range(len(self.furnitureManager.atticItems)): panel = FurnitureItemPanel(self.furnitureManager.atticItems[itemIndex], itemIndex, command=self.bringItemFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') self.atticItemPanels.append(panel) self.atticWallpaperPanels = [] for itemIndex in range(len(self.furnitureManager.atticWallpaper)): panel = FurnitureItemPanel(self.furnitureManager.atticWallpaper[itemIndex], itemIndex, command=self.bringWallpaperFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') self.atticWallpaperPanels.append(panel) self.atticWindowPanels = [] for itemIndex in range(len(self.furnitureManager.atticWindows)): panel = FurnitureItemPanel(self.furnitureManager.atticWindows[itemIndex], itemIndex, command=self.bringWindowFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') self.atticWindowPanels.append(panel) self.regenerateAtticPicker() def regenerateAtticPicker(self): selectedIndex = 0 if self.atticPicker: selectedIndex = self.atticPicker.getSelectedIndex() for panel in self.atticItemPanels: panel.detachNode() for panel in self.atticWallpaperPanels: panel.detachNode() for panel in self.atticWindowPanels: panel.detachNode() self.atticPicker.destroy() self.atticPicker = None itemList = self.atticItemPanels + self.atticWallpaperPanels + self.atticWindowPanels if self.deleteMode: text = TTLocalizer.HDDeletePickerLabel else: text = TTLocalizer.HDAtticPickerLabel self.atticPicker = self.createScrolledList(itemList, text, 'atticPicker', selectedIndex) if self.inRoomPicker or self.inTrashPicker: self.atticPicker.hide() else: self.atticPicker.show() return def createInRoomPicker(self): self.inRoomPanels = [] for objectId, object in self.objectDict.items(): panel = FurnitureItemPanel(object.dfitem.item, objectId, command=self.requestReturnToAttic, deleteMode=self.deleteMode, withinFunc=self.pickInRoom, helpCategory='FurnitureItemPanelRoom') self.inRoomPanels.append(panel) self.regenerateInRoomPicker() def regenerateInRoomPicker(self): selectedIndex = 0 if self.inRoomPicker: selectedIndex = self.inRoomPicker.getSelectedIndex() for panel in self.inRoomPanels: panel.detachNode() self.inRoomPicker.destroy() self.inRoomPicker = None if self.deleteMode: text = TTLocalizer.HDDeletePickerLabel else: text = TTLocalizer.HDInRoomPickerLabel self.inRoomPicker = self.createScrolledList(self.inRoomPanels, text, 'inRoomPicker', selectedIndex) return def createInTrashPicker(self): self.inTrashPanels = [] for itemIndex in range(len(self.furnitureManager.deletedItems)): panel = FurnitureItemPanel(self.furnitureManager.deletedItems[itemIndex], itemIndex, command=self.requestReturnToAtticFromTrash, helpCategory='FurnitureItemPanelTrash') self.inTrashPanels.append(panel) self.regenerateInTrashPicker() def regenerateInTrashPicker(self): selectedIndex = 0 if self.inTrashPicker: selectedIndex = self.inTrashPicker.getSelectedIndex() for panel in self.inTrashPanels: panel.detachNode() self.inTrashPicker.destroy() self.inTrashPicker = None text = TTLocalizer.HDInTrashPickerLabel self.inTrashPicker = self.createScrolledList(self.inTrashPanels, text, 'inTrashPicker', selectedIndex) return def createScrolledList(self, itemList, text, name, selectedIndex): gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui') picker = DirectScrolledList(parent=self.furnitureGui, pos=(-0.38, 0.0, 3), scale=7.125, relief=None, items=itemList, numItemsVisible=5, text=text, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_pos=(0, 0.4), decButton_image=(gui.find('**/FndsLst_ScrollUp'), gui.find('**/FndsLst_ScrollDN'), gui.find('**/FndsLst_ScrollUp_Rllvr'), gui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_scale=(1.5, 1.5, 1.5), decButton_pos=(0, 0, 0.3), decButton_image3_color=Vec4(1, 1, 1, 0.1), incButton_image=(gui.find('**/FndsLst_ScrollUp'), gui.find('**/FndsLst_ScrollDN'), gui.find('**/FndsLst_ScrollUp_Rllvr'), gui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_scale=(1.5, 1.5, -1.5), incButton_pos=(0, 0, -1.878), incButton_image3_color=Vec4(1, 1, 1, 0.1)) picker.setName(name) picker.scrollTo(selectedIndex) return picker def reset(): self.destroy() furnitureMenu.destroy() def showAtticPicker(self): if self.inRoomPicker: self.inRoomPicker.destroy() self.inRoomPicker = None if self.inTrashPicker: self.inTrashPicker.destroy() self.inTrashPicker = None self.atticPicker.show() self.inAtticButton['image_color'] = Vec4(1, 1, 1, 1) self.inRoomButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1) self.inTrashButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1) self.deleteExitButton['state'] = 'normal' self.deleteEnterButton['state'] = 'normal' return def showInRoomPicker(self): messenger.send('wakeup') if not self.inRoomPicker: self.createInRoomPicker() self.atticPicker.hide() if self.inTrashPicker: self.inTrashPicker.destroy() self.inTrashPicker = None self.inAtticButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1) self.inRoomButton['image_color'] = Vec4(1, 1, 1, 1) self.inTrashButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1) self.deleteExitButton['state'] = 'normal' self.deleteEnterButton['state'] = 'normal' return def showInTrashPicker(self): messenger.send('wakeup') if not self.inTrashPicker: self.createInTrashPicker() self.atticPicker.hide() if self.inRoomPicker: self.inRoomPicker.destroy() self.inRoomPicker = None self.inAtticButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1) self.inRoomButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1) self.inTrashButton['image_color'] = Vec4(1, 1, 1, 1) self.deleteExitButton['state'] = 'disabled' self.deleteEnterButton['state'] = 'disabled' return def sendItemToAttic(self): if base.config.GetBool('want-qa-regression', 0): self.notify.info('QA-REGRESSION: ESTATE: Send Item to Attic') messenger.send('wakeup') if self.selectedObject: callback = PythonUtil.Functor(self.__sendItemToAtticCallback, self.selectedObject.id()) self.furnitureManager.moveItemToAttic(self.selectedObject.dfitem, callback) self.deselectObject() def __sendItemToAtticCallback(self, objectId, retcode, item): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to send item %s to attic, reason %s.' % (item.getName(), retcode)) return del self.objectDict[objectId] if self.selectedObject != None and self.selectedObject.id() == objectId: self.selectedObject.detachNode() self.deselectObject() itemIndex = len(self.atticItemPanels) panel = FurnitureItemPanel(item, itemIndex, command=self.bringItemFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') self.atticItemPanels.append(panel) self.regenerateAtticPicker() if self.inRoomPicker: for i in range(len(self.inRoomPanels)): if self.inRoomPanels[i].itemId == objectId: del self.inRoomPanels[i] self.regenerateInRoomPicker() return return def cleanupDialog(self, buttonValue = None): if self.dialog: self.dialog.cleanup() self.dialog = None self.__enableItemButtons(1) return def enterDeleteMode(self): self.deleteMode = 1 self.__updateDeleteMode() def exitDeleteMode(self): self.deleteMode = 0 self.__updateDeleteMode() def __updateDeleteMode(self): if not self.atticPicker: return self.notify.debug('__updateDeleteMode deleteMode=%s' % self.deleteMode) if self.deleteMode: framePanelColor = DeletePickerPanelColor atticText = TTLocalizer.HDDeletePickerLabel inRoomText = TTLocalizer.HDDeletePickerLabel helpCategory = 'FurnitureItemPanelDelete' else: framePanelColor = NormalPickerPanelColor atticText = TTLocalizer.HDAtticPickerLabel inRoomText = TTLocalizer.HDInRoomPickerLabel helpCategory = None if self.inRoomPicker: self.inRoomPicker['text'] = inRoomText for panel in self.inRoomPicker['items']: panel.setDeleteMode(self.deleteMode) panel.bindHelpText(helpCategory) if self.atticPicker: self.atticPicker['text'] = atticText for panel in self.atticPicker['items']: panel.setDeleteMode(self.deleteMode) panel.bindHelpText(helpCategory) self.__updateDeleteButtons() return def __updateDeleteButtons(self): if self.deleteMode: self.deleteExitButton.show() self.deleteEnterButton.hide() else: self.deleteEnterButton.show() self.deleteExitButton.hide() def deleteItemFromRoom(self, dfitem, objectId, itemIndex): messenger.send('wakeup') callback = PythonUtil.Functor(self.__deleteItemFromRoomCallback, objectId, itemIndex) self.furnitureManager.deleteItemFromRoom(dfitem, callback) def __deleteItemFromRoomCallback(self, objectId, itemIndex, retcode, item): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to delete item %s from room, reason %s.' % (item.getName(), retcode)) return del self.objectDict[objectId] if self.selectedObject != None and self.selectedObject.id() == objectId: self.selectedObject.detachNode() self.deselectObject() if self.inRoomPicker and itemIndex is not None: del self.inRoomPanels[itemIndex] self.regenerateInRoomPicker() return def bringItemFromAttic(self, item, itemIndex): if base.config.GetBool('want-qa-regression', 0): self.notify.info('QA-REGRESSION: ESTATE: Place Item in Room') messenger.send('wakeup') self.__enableItemButtons(0) if self.deleteMode: self.requestDelete(item, itemIndex, self.deleteItemFromAttic) return pos = self.targetNodePath.getRelativePoint(base.localAvatar, Point3(0, 2, 0)) hpr = Point3(0, 0, 0) if abs(pos[0]) > 3000 or abs(pos[1]) > 3000 or abs(pos[2]) > 300: self.notify.warning('bringItemFromAttic extreme pos targetNodePath=%s avatar=%s %s' % (repr(self.targetNodePath.getPos(render)), repr(base.localAvatar.getPos(render)), repr(pos))) if item.getFlags() & CatalogFurnitureItem.FLPainting: for object in self.objectDict.values(): object.stashBuiltInCollisionNodes() self.gridSnapNP.iPosHpr() target = self.targetNodePath self.iRay.setParentNP(base.localAvatar) entry = self.iRay.pickBitMask3D(bitMask=ToontownGlobals.WallBitmask, targetNodePath=target, origin=Point3(0, 0, 6), dir=Vec3(0, 1, 0), skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE) for object in self.objectDict.values(): object.unstashBuiltInCollisionNodes() if entry: self.alignObject(entry, target, fClosest=0, wallOffset=0.1) pos = self.gridSnapNP.getPos(target) hpr = self.gridSnapNP.getHpr(target) else: self.notify.warning('wall not found for painting') self.furnitureManager.moveItemFromAttic(itemIndex, (pos[0], pos[1], pos[2], hpr[0], hpr[1], hpr[2]), self.__bringItemFromAtticCallback) def __bringItemFromAtticCallback(self, retcode, dfitem, itemIndex): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to bring furniture item %s into room, reason %s.' % (itemIndex, retcode)) return mo = self.loadObject(dfitem) objectId = mo.id() self.atticItemPanels[itemIndex].destroy() del self.atticItemPanels[itemIndex] for i in range(itemIndex, len(self.atticItemPanels)): self.atticItemPanels[i].itemId -= 1 self.regenerateAtticPicker() if self.inRoomPicker: panel = FurnitureItemPanel(dfitem.item, objectId, command=self.requestReturnToAttic, helpCategory='FurnitureItemPanelRoom') self.inRoomPanels.append(panel) self.regenerateInRoomPicker() def deleteItemFromAttic(self, item, itemIndex): messenger.send('wakeup') self.furnitureManager.deleteItemFromAttic(item, itemIndex, self.__deleteItemFromAtticCallback) def __deleteItemFromAtticCallback(self, retcode, item, itemIndex): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to delete furniture item %s, reason %s.' % (itemIndex, retcode)) return self.atticItemPanels[itemIndex].destroy() del self.atticItemPanels[itemIndex] for i in range(itemIndex, len(self.atticItemPanels)): self.atticItemPanels[i].itemId -= 1 self.regenerateAtticPicker() def bringWallpaperFromAttic(self, item, itemIndex): messenger.send('wakeup') self.__enableItemButtons(0) if self.deleteMode: self.requestDelete(item, itemIndex, self.deleteWallpaperFromAttic) return if base.localAvatar.getY() < 2.3: room = 0 else: room = 1 self.furnitureManager.moveWallpaperFromAttic(itemIndex, room, self.__bringWallpaperFromAtticCallback) def __bringWallpaperFromAtticCallback(self, retcode, itemIndex, room): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to bring wallpaper %s into room %s, reason %s.' % (itemIndex, room, retcode)) return self.atticWallpaperPanels[itemIndex].destroy() item = self.furnitureManager.atticWallpaper[itemIndex] panel = FurnitureItemPanel(item, itemIndex, command=self.bringWallpaperFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') self.atticWallpaperPanels[itemIndex] = panel self.regenerateAtticPicker() def deleteWallpaperFromAttic(self, item, itemIndex): messenger.send('wakeup') self.furnitureManager.deleteWallpaperFromAttic(item, itemIndex, self.__deleteWallpaperFromAtticCallback) def __deleteWallpaperFromAtticCallback(self, retcode, item, itemIndex): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to delete wallpaper %s, reason %s.' % (itemIndex, retcode)) return self.atticWallpaperPanels[itemIndex].destroy() del self.atticWallpaperPanels[itemIndex] for i in range(itemIndex, len(self.atticWallpaperPanels)): self.atticWallpaperPanels[i].itemId -= 1 self.regenerateAtticPicker() def bringWindowFromAttic(self, item, itemIndex): messenger.send('wakeup') self.__enableItemButtons(0) if self.deleteMode: self.requestDelete(item, itemIndex, self.deleteWindowFromAttic) return if base.localAvatar.getY() < 2.3: slot = 2 else: slot = 4 self.furnitureManager.moveWindowFromAttic(itemIndex, slot, self.__bringWindowFromAtticCallback) def __bringWindowFromAtticCallback(self, retcode, itemIndex, slot): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to bring window %s into slot %s, reason %s.' % (itemIndex, slot, retcode)) return if retcode == ToontownGlobals.FM_SwappedItem: self.atticWindowPanels[itemIndex].destroy() item = self.furnitureManager.atticWindows[itemIndex] panel = FurnitureItemPanel(item, itemIndex, command=self.bringWindowFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') self.atticWindowPanels[itemIndex] = panel else: self.atticWindowPanels[itemIndex].destroy() del self.atticWindowPanels[itemIndex] for i in range(itemIndex, len(self.atticWindowPanels)): self.atticWindowPanels[i].itemId -= 1 self.regenerateAtticPicker() def deleteWindowFromAttic(self, item, itemIndex): messenger.send('wakeup') self.furnitureManager.deleteWindowFromAttic(item, itemIndex, self.__deleteWindowFromAtticCallback) def __deleteWindowFromAtticCallback(self, retcode, item, itemIndex): self.__enableItemButtons(1) if retcode < 0: self.notify.info('Unable to delete window %s, reason %s.' % (itemIndex, retcode)) return self.atticWindowPanels[itemIndex].destroy() del self.atticWindowPanels[itemIndex] for i in range(itemIndex, len(self.atticWindowPanels)): self.atticWindowPanels[i].itemId -= 1 self.regenerateAtticPicker() def setGridSpacingString(self, spacingStr): spacing = eval(spacingStr) self.setGridSpacing(spacing) def setGridSpacing(self, gridSpacing): self.gridSpacing = gridSpacing def makeHouseExtentsBox(self): houseGeom = self.targetNodePath.findAllMatches('**/group*') targetBounds = houseGeom.getTightBounds() self.houseExtents = self.targetNodePath.attachNewNode('furnitureCollisionNode') mx = targetBounds[0][0] Mx = targetBounds[1][0] my = targetBounds[0][1] My = targetBounds[1][1] mz = targetBounds[0][2] Mz = targetBounds[1][2] cn = CollisionNode('extentsCollisionNode') cn.setIntoCollideMask(ToontownGlobals.GhostBitmask) self.houseExtents.attachNewNode(cn) cp = CollisionPolygon(Point3(mx, my, mz), Point3(mx, My, mz), Point3(mx, My, Mz), Point3(mx, my, Mz)) cn.addSolid(cp) cp = CollisionPolygon(Point3(Mx, My, mz), Point3(Mx, my, mz), Point3(Mx, my, Mz), Point3(Mx, My, Mz)) cn.addSolid(cp) cp = CollisionPolygon(Point3(Mx, my, mz), Point3(mx, my, mz), Point3(mx, my, Mz), Point3(Mx, my, Mz)) cn.addSolid(cp) cp = CollisionPolygon(Point3(mx, My, mz), Point3(Mx, My, mz), Point3(Mx, My, Mz), Point3(mx, My, Mz)) cn.addSolid(cp) def makeDoorBlocker(self): self.doorBlocker = self.targetNodePath.attachNewNode('doorBlocker') cn = CollisionNode('doorBlockerCollisionNode') cn.setIntoCollideMask(ToontownGlobals.FurnitureSideBitmask) self.doorBlocker.attachNewNode(cn) cs = CollisionSphere(Point3(-12, -33, 0), 7.5) cn.addSolid(cs) def createVerifyDialog(self, item, verifyText, okFunc, cancelFunc): if self.verifyFrame == None: buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui') okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')) cancelButtonImage = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')) self.verifyFrame = DirectFrame(pos=(-0.4, 0.1, 0.3), scale=0.75, relief=None, image=DGG.getDefaultDialogGeom(), image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.2, 1, 1.3), text='', text_wordwrap=19, text_scale=0.06, text_pos=(0, 0.5), textMayChange=1, sortOrder=NO_FADE_SORT_INDEX) self.okButton = DirectButton(parent=self.verifyFrame, image=okButtonImage, relief=None, text=OTPLocalizer.DialogOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(-0.22, 0.0, -0.5)) self.cancelButton = DirectButton(parent=self.verifyFrame, image=cancelButtonImage, relief=None, text=OTPLocalizer.DialogCancel, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.22, 0.0, -0.5)) self.deleteItemText = DirectLabel(parent=self.verifyFrame, relief=None, text='', text_wordwrap=16, pos=(0.0, 0.0, -0.4), scale=0.09) self.verifyFrame['text'] = verifyText self.deleteItemText['text'] = item.getName() self.okButton['command'] = okFunc self.cancelButton['command'] = cancelFunc self.verifyFrame.show() self.itemPanel, self.itemIval = item.getPicture(base.localAvatar) if self.itemPanel: self.itemPanel.reparentTo(self.verifyFrame, -1) self.itemPanel.setPos(0, 0, 0.05) self.itemPanel.setScale(0.35) self.deleteItemText.setPos(0.0, 0.0, -0.4) else: self.deleteItemText.setPos(0, 0, 0.07) if self.itemIval: self.itemIval.loop() return def __handleVerifyDeleteOK(self): if base.config.GetBool('want-qa-regression', 0): self.notify.info('QA-REGRESSION: ESTATE: Send Item to Trash') deleteFunction = self.verifyItems[0] deleteFunctionArgs = self.verifyItems[1:] self.__cleanupVerifyDelete() deleteFunction(*deleteFunctionArgs) def __cleanupVerifyDelete(self, *args): if self.nonDeletableItem: self.nonDeletableItem.cleanup() self.nonDeletableItem = None if self.verifyFrame: self.verifyFrame.hide() if self.itemIval: self.itemIval.finish() self.itemIval = None if self.itemPanel: self.itemPanel.destroy() self.itemPanel = None self.verifyItems = None return def __enableItemButtons(self, enabled): self.notify.debug('__enableItemButtons %d' % enabled) if enabled: buttonState = DGG.NORMAL else: buttonState = DGG.DISABLED if hasattr(self, 'inAtticButton'): self.inAtticButton['state'] = buttonState if hasattr(self, 'inRoomButton'): self.inRoomButton['state'] = buttonState if hasattr(self, 'inTrashButton'): self.inTrashButton['state'] = buttonState pickers = [self.atticPicker, self.inRoomPicker, self.inTrashPicker] for picker in pickers: if picker: for panel in picker['items']: if not panel.isEmpty(): panel.enable(enabled) def __resetAndCleanup(self, *args): self.__enableItemButtons(1) self.__cleanupVerifyDelete() def requestDelete(self, item, itemIndex, deleteFunction): self.__cleanupVerifyDelete() if self.furnitureManager.ownerId != base.localAvatar.doId or not item.isDeletable(): self.warnNonDeletableItem(item) return self.createVerifyDialog(item, TTLocalizer.HDDeleteItem, self.__handleVerifyDeleteOK, self.__resetAndCleanup) self.verifyItems = (deleteFunction, item, itemIndex) def requestRoomDelete(self, dfitem, objectId, itemIndex): self.__cleanupVerifyDelete() item = dfitem.item if self.furnitureManager.ownerId != base.localAvatar.doId or not item.isDeletable(): self.warnNonDeletableItem(item) return self.createVerifyDialog(item, TTLocalizer.HDDeleteItem, self.__handleVerifyDeleteOK, self.__resetAndCleanup) self.verifyItems = (self.deleteItemFromRoom, dfitem, objectId, itemIndex) def warnNonDeletableItem(self, item): message = TTLocalizer.HDNonDeletableItem if not item.isDeletable(): if item.getFlags() & CatalogFurnitureItem.FLBank: message = TTLocalizer.HDNonDeletableBank elif item.getFlags() & CatalogFurnitureItem.FLCloset: message = TTLocalizer.HDNonDeletableCloset elif item.getFlags() & CatalogFurnitureItem.FLPhone: message = TTLocalizer.HDNonDeletablePhone elif item.getFlags() & CatalogFurnitureItem.FLTrunk: message = TTLocalizer.HDNonDeletableTrunk if self.furnitureManager.ownerId != base.localAvatar.doId: message = TTLocalizer.HDNonDeletableNotOwner % self.furnitureManager.ownerName self.nonDeletableItem = TTDialog.TTDialog(text=message, style=TTDialog.Acknowledge, fadeScreen=0, command=self.__resetAndCleanup) self.nonDeletableItem.show() def requestReturnToAttic(self, item, objectId): self.__cleanupVerifyDelete() itemIndex = None for i in range(len(self.inRoomPanels)): if self.inRoomPanels[i].itemId == objectId: itemIndex = i self.__enableItemButtons(0) break if self.deleteMode: dfitem = self.objectDict[objectId].dfitem self.requestRoomDelete(dfitem, objectId, itemIndex) return self.createVerifyDialog(item, TTLocalizer.HDReturnVerify, self.__handleVerifyReturnOK, self.__resetAndCleanup) self.verifyItems = (item, objectId) return def __handleVerifyReturnOK(self): item, objectId = self.verifyItems self.__cleanupVerifyDelete() self.pickInRoom(objectId) self.sendItemToAttic() def requestReturnToAtticFromTrash(self, item, itemIndex): self.__cleanupVerifyDelete() self.__enableItemButtons(0) self.createVerifyDialog(item, TTLocalizer.HDReturnFromTrashVerify, self.__handleVerifyReturnFromTrashOK, self.__resetAndCleanup) self.verifyItems = (item, itemIndex) def __handleVerifyReturnFromTrashOK(self): if base.config.GetBool('want-qa-regression', 0): self.notify.info('QA-REGRESSION: ESTATE: Send Item to Attic') item, itemIndex = self.verifyItems self.__cleanupVerifyDelete() self.recoverDeletedItem(item, itemIndex) def recoverDeletedItem(self, item, itemIndex): messenger.send('wakeup') self.furnitureManager.recoverDeletedItem(item, itemIndex, self.__recoverDeletedItemCallback) def __recoverDeletedItemCallback(self, retcode, item, itemIndex): self.__cleanupVerifyDelete() if retcode < 0: if retcode == ToontownGlobals.FM_HouseFull: self.showHouseFullDialog() self.notify.info('Unable to recover deleted item %s, reason %s.' % (itemIndex, retcode)) return self.__enableItemButtons(1) self.inTrashPanels[itemIndex].destroy() del self.inTrashPanels[itemIndex] for i in range(itemIndex, len(self.inTrashPanels)): self.inTrashPanels[i].itemId -= 1 self.regenerateInTrashPicker() itemType = item.getTypeCode() if itemType == CatalogItemTypes.WALLPAPER_ITEM or itemType == CatalogItemTypes.FLOORING_ITEM or itemType == CatalogItemTypes.MOULDING_ITEM or itemType == CatalogItemTypes.WAINSCOTING_ITEM: itemIndex = len(self.atticWallpaperPanels) bringCommand = self.bringWallpaperFromAttic elif itemType == CatalogItemTypes.WINDOW_ITEM: itemIndex = len(self.atticWindowPanels) bringCommand = self.bringWindowFromAttic else: itemIndex = len(self.atticItemPanels) bringCommand = self.bringItemFromAttic panel = FurnitureItemPanel(item, itemIndex, command=bringCommand, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic') if itemType == CatalogItemTypes.WALLPAPER_ITEM or itemType == CatalogItemTypes.FLOORING_ITEM or itemType == CatalogItemTypes.MOULDING_ITEM or itemType == CatalogItemTypes.WAINSCOTING_ITEM: self.atticWallpaperPanels.append(panel) elif itemType == CatalogItemTypes.WINDOW_ITEM: self.atticWindowPanels.append(panel) else: self.atticItemPanels.append(panel) self.regenerateAtticPicker() def showHouseFullDialog(self): self.cleanupDialog() self.dialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.HDHouseFull, text_wordwrap=15, command=self.cleanupDialog) self.dialog.show() def bindHelpText(self, button, category): button.bind(DGG.ENTER, self.showHelpText, extraArgs=[category, None]) button.bind(DGG.EXIT, self.hideHelpText) return def showHelpText(self, category, itemName, xy): def showIt(task): helpText = TTLocalizer.HDHelpDict.get(category) if helpText: if itemName: helpText = helpText % itemName self.helpText['text'] = helpText self.helpText.show() else: print 'category: %s not found' taskMgr.doMethodLater(0.75, showIt, 'showHelpTextDoLater') def hideHelpText(self, xy): taskMgr.remove('showHelpTextDoLater') self.helpText['text'] = '' self.helpText.hide()
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Normalization preprocessing layer.""" # pylint: disable=g-classes-have-attributes from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import base_preprocessing_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.util import compat from tensorflow.python.util.tf_export import keras_export _COUNT_NAME = 'count' _MEAN_NAME = 'mean' _VARIANCE_NAME = 'variance' def convert_to_ndarray(values): if isinstance(values, np.ndarray): return values elif isinstance(values, ops.Tensor): return K.get_value(values) else: return np.array(values) @keras_export('keras.layers.experimental.preprocessing.Normalization', v1=[]) class Normalization(base_preprocessing_layer.CombinerPreprocessingLayer): """Feature-wise normalization of the data. This layer will coerce its inputs into a distribution centered around 0 with standard deviation 1. It accomplishes this by precomputing the mean and variance of the data, and calling (input-mean)/sqrt(var) at runtime. What happens in `adapt`: Compute mean and variance of the data and store them as the layer's weights. `adapt` should be called before `fit`, `evaluate`, or `predict`. Arguments: axis: Integer or tuple of integers, the axis or axes that should be "kept". These axes are not be summed over when calculating the normalization statistics. By default the last axis, the `features` axis is kept and any `space` or `time` axes are summed. Each element in the the axes that are kept is normalized independently. If `axis` is set to 'None', the layer will perform scalar normalization (dividing the input by a single scalar value). The `batch` axis, 0, is always summed over (`axis=0` is not allowed). mean: The mean value(s) to use during normalization. The passed value(s) will be broadcast to the shape of the kept axes above; if the value(s) cannot be broadcast, an error will be raised when this layer's build() method is called. variance: The variance value(s) to use during normalization. The passed value(s) will be broadcast to the shape of the kept axes above; if the value(s)cannot be broadcast, an error will be raised when this layer's build() method is called. Examples: Calculate the mean and variance by analyzing the dataset in `adapt`. >>> adapt_data = np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32) >>> input_data = np.array([[1.], [2.], [3.]], np.float32) >>> layer = Normalization() >>> layer.adapt(adapt_data) >>> layer(input_data) <tf.Tensor: shape=(3, 1), dtype=float32, numpy= array([[-1.4142135 ], [-0.70710677], [ 0. ]], dtype=float32)> Pass the mean and variance directly. >>> input_data = np.array([[1.], [2.], [3.]], np.float32) >>> layer = Normalization(mean=3., variance=2.) >>> layer(input_data) <tf.Tensor: shape=(3, 1), dtype=float32, numpy= array([[-1.4142135 ], [-0.70710677], [ 0. ]], dtype=float32)> """ def __init__(self, axis=-1, mean=None, variance=None, **kwargs): # Standardize `axis` to a tuple. if axis is None: axis = () elif isinstance(axis, int): axis = (axis,) else: axis = tuple(axis) super(Normalization, self).__init__( combiner=_NormalizingCombiner(axis), **kwargs) base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True) if 0 in axis: raise ValueError('The argument \'axis\' may not be 0.') self.axis = axis if isinstance(mean, variables.Variable): raise ValueError('Normalization does not support passing a Variable ' 'for the `mean` init arg.') if isinstance(variance, variables.Variable): raise ValueError('Normalization does not support passing a Variable ' 'for the `variance` init arg.') if mean is not None and variance is not None: mean = convert_to_ndarray(mean) variance = convert_to_ndarray(variance) elif mean is not None or variance is not None: raise ValueError( 'When setting values directly, both `mean` and `variance` ' 'must be set. Got mean: {} and variance: {}'.format(mean, variance)) self.mean_val = mean self.variance_val = variance def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if len(input_shape) == 1: input_shape = input_shape + [1] ndim = len(input_shape) # Sort `self.axis` to avoid transposing `mean_and_var_shape`. # Negative axes are not sortable until you know the number of dimensions. original_axis = self.axis self.axis = tuple(sorted(self.axis, key=lambda a: a if a >= 0 else ndim + a)) if any(a < 1-ndim for a in self.axis) or any(a >= ndim for a in self.axis): raise ValueError('All `axis` values must be in ' 'the range [1-ndim, ndim-1].\n' 'Got:\n' ' ndim: {}\n' ' axis: {}'.format(ndim, original_axis)) self._broadcast_shape = [1 for _ in range(len(input_shape))] mean_and_var_shape = [] for i in self.axis: mean_and_var_shape.append(input_shape[i]) self._broadcast_shape[i] = input_shape[i] # count is not used in this class's call() method, but is used to re-create # the accumulator during multiple calls to 'adapt'. # TODO(omalleyt): should mean and variance be set to self.dtype? self.mean = self._add_state_variable( name=_MEAN_NAME, shape=mean_and_var_shape, dtype=K.floatx(), initializer=init_ops.zeros_initializer) self.variance = self._add_state_variable( name=_VARIANCE_NAME, shape=mean_and_var_shape, dtype=K.floatx(), initializer=init_ops.ones_initializer) self.count = self._add_state_variable( name=_COUNT_NAME, shape=(), dtype=dtypes.int64, initializer=init_ops.zeros_initializer) super(Normalization, self).build(input_shape) if (self.mean_val is not None and self.variance_val is not None): mean_val = self.mean_val * np.ones(mean_and_var_shape) variance_val = self.variance_val * np.ones(mean_and_var_shape) self.set_weights([mean_val, variance_val]) def call(self, inputs): inputs = ops.convert_to_tensor_v2_with_dispatch(inputs) if inputs.shape.rank == 1: inputs = array_ops.expand_dims(inputs, 1) # If the inputs are not floats, cast them to floats. This avoids issues # with int-float multiplication and division below. if inputs.dtype != K.floatx(): inputs = math_ops.cast(inputs, K.floatx()) # We need to reshape the mean and variance data to ensure that Tensorflow # broadcasts the data correctly. mean = array_ops.reshape(self.mean, self._broadcast_shape) variance = array_ops.reshape(self.variance, self._broadcast_shape) return ((inputs - mean) / math_ops.maximum(math_ops.sqrt(variance), K.epsilon())) def compute_output_shape(self, input_shape): return input_shape def compute_output_signature(self, input_spec): return input_spec def get_config(self): config = {'axis': self.axis} base_config = super(Normalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def set_weights(self, weights): """Override for set_weights to ensure we can set just mean/var weights.""" if len(weights) == 2: weights.append(np.array(0)) super(Normalization, self).set_weights(weights) class _NormalizingCombiner(base_preprocessing_layer.Combiner): """Combiner for the Normalization preprocessing layer. This class encapsulates the computations for finding the mean and variance of a set of data in a stable and numerically correct way. Its associated accumulator is a namedtuple('count', 'mean', 'variance'). Attributes: axis: The axis to compute mean and var over. """ COUNT_IDX = 0 MEAN_IDX = 1 VAR_IDX = 2 def __init__(self, axis): self.axis = axis def compute(self, values, accumulator=None): """Compute a step in this computation, returning a new accumulator.""" values = np.array(values) if values.ndim == 1: values = np.expand_dims(values, 1) # `np.delete` ignores negative indexes, so use a mask to delete items. axis_mask = np.ones([values.ndim], dtype=bool) axis_mask[np.array(self.axis, dtype=np.int32)] = False # This is the shape of all reduced axes (not specified in 'axis'). reduction_counts = np.array(values.shape)[axis_mask] # We get the number of elements that will be reduced by multiplying all # values of 'shape' corresponding to the reduced axes. count = np.prod(reduction_counts, dtype=np.int64) # We want to reduce across dimensions except those specified in 'axis' # when using np.mean or np.variance; create the tuple of axes to reduce # over here. reduction_axes = tuple(np.arange(values.ndim)[axis_mask]) mean = np.mean(values, axis=reduction_axes, dtype=np.float64) variance = np.var(values, axis=reduction_axes, dtype=np.float64) # Create an accumulator with our new data and either return it or combine # it with the passed accumulator. if accumulator is None: return self._create_accumulator(count, mean, variance) else: return self.add_data_to_accumulator(count, mean, variance, accumulator) def add_data_to_accumulator(self, count, mean, variance, accumulator): """Add new data to the totals in an accumulator.""" # Combine accumulators and return the result. combined_count = count + accumulator[self.COUNT_IDX] # To combine accumulator means, we weight each accumulator's mean by the # number of elements that were accumulated, and then divide by the # total number of elements. combined_mean = (mean * count + accumulator[self.MEAN_IDX] * accumulator[self.COUNT_IDX]) / combined_count # The variance is computed using the lack-of-fit sum of squares # formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares). accumulator_var_contribution = accumulator[self.COUNT_IDX] * ( accumulator[self.VAR_IDX] + np.square(accumulator[self.MEAN_IDX] - combined_mean)) data_var_contribution = count * (variance + np.square(mean - combined_mean)) combined_variance = (accumulator_var_contribution + data_var_contribution) / combined_count accumulator[self.COUNT_IDX] = combined_count accumulator[self.MEAN_IDX] = np.nan_to_num(combined_mean) accumulator[self.VAR_IDX] = np.nan_to_num(combined_variance) return accumulator def merge(self, accumulators): """Merge several accumulators to a single accumulator.""" # Combine accumulators and return the result. combined_count = np.sum( [accumulator[self.COUNT_IDX] for accumulator in accumulators]) # To combine accumulator means, we weight each accumulator's mean by the # number of elements that were accumulated, and then divide by the # total number of elements. combined_mean = np.add.reduce([ accumulator[self.MEAN_IDX] * accumulator[self.COUNT_IDX] for accumulator in accumulators ]) / combined_count # The variance is computed using the lack-of-fit sum of squares # formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares). def variance_contribution(accumulator): return accumulator[self.COUNT_IDX] * ( accumulator[self.VAR_IDX] + np.square(accumulator[self.MEAN_IDX] - combined_mean)) combined_variance = np.add.reduce([ variance_contribution(accumulator) for accumulator in accumulators ]) / combined_count return self._create_accumulator(combined_count, combined_mean, combined_variance) def extract(self, accumulator): """Convert an accumulator into a dict of output values.""" return { _COUNT_NAME: accumulator[self.COUNT_IDX], _MEAN_NAME: accumulator[1], _VARIANCE_NAME: accumulator[2] } def restore(self, output): """Create an accumulator based on 'output'.""" # There is no special internal state here, so we just return the relevant # internal value. count = output[_COUNT_NAME] mean = output[_MEAN_NAME] var = output[_VARIANCE_NAME] if (count == 0 and (mean.any() != 0.0 or var.any() != 0.0)): raise RuntimeError( 'The mean and/or variance of a Normalization preprocessing layer ' "were set without also setting 'count'. If 'count' is not also set, " " or was set to 0, 'adapt' cannot be called unless the 'reset_state'" 'arg is True.') return self._create_accumulator(output[_COUNT_NAME], output[_MEAN_NAME], output[_VARIANCE_NAME]) def serialize(self, accumulator): """Serialize an accumulator for a remote call.""" output_dict = { _COUNT_NAME: accumulator[self.COUNT_IDX].tolist(), _MEAN_NAME: accumulator[1].tolist(), _VARIANCE_NAME: accumulator[2].tolist() } return compat.as_bytes(json.dumps(output_dict)) def deserialize(self, encoded_accumulator): """Deserialize an accumulator received from 'serialize()'.""" value_dict = json.loads(compat.as_text(encoded_accumulator)) return self._create_accumulator( np.array(value_dict[_COUNT_NAME]), np.array(value_dict[_MEAN_NAME]), np.array(value_dict[_VARIANCE_NAME])) def _create_accumulator(self, count, mean, variance): """Convert any 'nan' values in the given accumulator to numeric values.""" return [count, mean, variance]
# -*- coding: utf-8 -*- import HTMLParser import json import math import re import time from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType from CommandInterface import CommandInterface from Data.api_keys import load_key from Data import ignores from Utils import WebUtils, StringUtils from bs4 import BeautifulSoup from isodate import parse_duration from twisted.words.protocols.irc import assembleFormattedText, attributes as A class URLFollow(CommandInterface): triggers = ['urlfollow', 'follow'] acceptedTypes = ['PRIVMSG', 'ACTION'] help = 'automatic function that follows urls and grabs information about the resultant webpage' runInThread = True htmlParser = HTMLParser.HTMLParser() graySplitter = assembleFormattedText(A.normal[' ', A.fg.gray['|'], ' ']) def onLoad(self): self.handledExternally = {} """@type : dict[str, list[str]]""" # dict of regex patterns not to follow. populated by other modules so they can handle them themselves self.youtubeKey = load_key(u'YouTube') self.imgurClientID = load_key(u'imgur Client ID') self.autoFollow = True def shouldExecute(self, message): """ @type message: IRCMessage """ if message.Type not in self.acceptedTypes: return False if ignores.ignoreList is not None: if message.User.Name.lower() in ignores.ignoreList: return False return True def execute(self, message): """ @type message: IRCMessage """ match = None if message.Command.lower() in self.triggers: if message.ParameterList[0].lower() == 'on': self.autoFollow = True return IRCResponse(ResponseType.Say, 'Auto-follow on', message.ReplyTo) elif message.ParameterList[0].lower() == 'off': self.autoFollow = False return IRCResponse(ResponseType.Say, 'Auto-follow off', message.ReplyTo) else: match = re.search(r'(?P<url>(https?://|www\.)[^\s]+)', message.Parameters, re.IGNORECASE) elif self.autoFollow: match = re.search(r'(?P<url>(https?://|www\.)[^\s]+)', message.MessageString, re.IGNORECASE) if not match: return for module, patterns in self.handledExternally.iteritems(): for pattern in patterns: if re.search(pattern, message.MessageString): return # url will be handled by another module return self.DispatchToFollows(match.group('url'), message) def DispatchToFollows(self, url, message): """ @type url: unicode @type message: IRCMessage """ youtubeMatch = re.search(r'(youtube\.com/watch.+v=|youtu\.be/)(?P<videoID>[^&#\?]{11})', url) imgurMatch = re.search(r'(i\.)?imgur\.com/(?P<imgurID>[^\.]+)', url) twitterMatch = re.search(r'twitter\.com/(?P<tweeter>[^/]+)/status(es)?/(?P<tweetID>[0-9]+)', url) steamMatch = re.search(r'store\.steampowered\.com/(?P<steamType>(app|sub))/(?P<steamID>[0-9]+)', url) ksMatch = re.search(r'kickstarter\.com/projects/(?P<ksID>[^/]+/[^/&#\?]+)', url) twitchMatch = re.search(r'twitch\.tv/(?P<twitchChannel>[^/]+)/?(\s|$)', url) if youtubeMatch: return self.FollowYouTube(youtubeMatch.group('videoID'), message) elif imgurMatch: return self.FollowImgur(imgurMatch.group('imgurID'), message) elif twitterMatch: return self.FollowTwitter(twitterMatch.group('tweeter'), twitterMatch.group('tweetID'), message) elif steamMatch: return self.FollowSteam(steamMatch.group('steamType'), steamMatch.group('steamID'), message) elif ksMatch: return self.FollowKickstarter(ksMatch.group('ksID'), message) elif twitchMatch: return self.FollowTwitch(twitchMatch.group('twitchChannel'), message) elif not re.search('\.(jpe?g|gif|png|bmp)$', url): return self.FollowStandard(url, message) def FollowYouTube(self, videoID, message): if self.youtubeKey is None: return IRCResponse(ResponseType.Say, '[YouTube API key not found]', message.ReplyTo) fields = 'items(id,snippet(title,description,channelTitle),contentDetails(duration))' parts = 'snippet,contentDetails' url = 'https://www.googleapis.com/youtube/v3/videos?id={}&fields={}&part={}&key={}'.format(videoID, fields, parts, self.youtubeKey) webPage = WebUtils.fetchURL(url) webPage.body = webPage.body.decode('utf-8') j = json.loads(webPage.body) if 'items' not in j: return None title = j['items'][0]['snippet']['title'] description = j['items'][0]['snippet']['description'] channel = j['items'][0]['snippet']['channelTitle'] length = parse_duration(j["items"][0]["contentDetails"]["duration"]).total_seconds() m, s = divmod(int(length), 60) h, m = divmod(m, 60) if h > 0: length = u'{0:02d}:{1:02d}:{2:02d}'.format(h, m, s) else: length = u'{0:02d}:{1:02d}'.format(m, s) if not description: description = u'<no description available>' description = re.sub('(\n|\s)+', ' ', description) limit = 150 if len(description) > limit: description = u'{} ...'.format(description[:limit].rsplit(' ', 1)[0]) return IRCResponse(ResponseType.Say, self.graySplitter.join([title, length, channel, description]), message.ReplyTo, {'urlfollowURL': 'http://youtu.be/{}'.format(videoID)}) def FollowImgur(self, imgurID, message): if self.imgurClientID is None: return IRCResponse(ResponseType.Say, '[imgur Client ID not found]', message.ReplyTo) if imgurID.startswith('gallery/'): imgurID = imgurID.replace('gallery/', '') albumLink = False if imgurID.startswith('a/'): imgurID = imgurID.replace('a/', '') url = 'https://api.imgur.com/3/album/{0}'.format(imgurID) albumLink = True else: url = 'https://api.imgur.com/3/image/{0}'.format(imgurID) headers = [('Authorization', 'Client-ID {0}'.format(self.imgurClientID))] webPage = WebUtils.fetchURL(url, headers) if webPage is None: url = 'https://api.imgur.com/3/gallery/{0}'.format(imgurID) webPage = WebUtils.fetchURL(url, headers) if webPage is None: return response = json.loads(webPage.body) imageData = response['data'] if imageData['title'] is None: url = 'https://api.imgur.com/3/gallery/{0}'.format(imgurID) webPage = WebUtils.fetchURL(url, headers) if webPage is not None: imageData = json.loads(webPage.body)['data'] if imageData['title'] is None: webPage = WebUtils.fetchURL('http://imgur.com/{0}'.format(imgurID)) imageData['title'] = self.GetTitle(webPage.body).replace(' - Imgur', '') if imageData['title'] == 'imgur: the simple image sharer': imageData['title'] = None data = [] if imageData['title'] is not None: data.append(imageData['title']) else: data.append(u'<No Title>') if imageData['nsfw']: data.append(u'\x034\x02NSFW!\x0F') if albumLink: data.append(u'Album: {0} Images'.format(imageData['images_count'])) else: if 'is_album' in imageData and imageData['is_album']: data.append(u'Album: {0:,d} Images'.format(len(imageData['images']))) else: if imageData[u'animated']: data.append(u'\x032\x02Animated!\x0F') data.append(u'{0:,d}x{1:,d}'.format(imageData['width'], imageData['height'])) data.append(u'Size: {0:,d}kb'.format(int(imageData['size'])/1024)) data.append(u'Views: {0:,d}'.format(imageData['views'])) return IRCResponse(ResponseType.Say, self.graySplitter.join(data), message.ReplyTo, {'urlfollowURL': '[nope, imgur is too hard. also, pointless?]'}) def FollowTwitter(self, tweeter, tweetID, message): webPage = WebUtils.fetchURL('https://twitter.com/{0}/status/{1}'.format(tweeter, tweetID)) soup = BeautifulSoup(webPage.body) tweet = soup.find(class_='permalink-tweet') user = tweet.find(class_='username').text tweetText = tweet.find(class_='tweet-text') tweetTimeText = tweet.find(class_='client-and-actions').text.strip() try: tweetTimeText = time.strftime('%Y/%m/%d %H:%M', time.strptime(tweetTimeText, '%I:%M %p - %d %b %Y')) except ValueError: pass links = tweetText.find_all('a', {'data-expanded-url': True}) for link in links: link.string = ' ' + link['data-expanded-url'] embeddedLinks = tweetText.find_all('a', {'data-pre-embedded': 'true'}) for link in embeddedLinks: link.string = ' ' + link['href'] text = StringUtils.unescapeXHTML(tweetText.text) text = re.sub('[\r\n]+', self.graySplitter, text) formatString = unicode(assembleFormattedText(A.normal[A.fg.gray['[{0}]'], A.bold[' {1}:'], ' {2}'])) return IRCResponse(ResponseType.Say, formatString.format(tweetTimeText, user, text), message.ReplyTo, {'urlfollowURL': 'https://twitter.com/{}/status/{}'.format(tweeter, tweetID)}) def FollowSteam(self, steamType, steamId, message): steamType = {'app': 'app', 'sub': 'package'}[steamType] webPage = WebUtils.fetchURL('http://store.steampowered.com/api/{0}details/?{0}ids={1}&cc=US&l=english&v=1'.format(steamType, steamId)) response = json.loads(webPage.body) if not response[steamId]['success']: return # failure appData = response[steamId]['data'] data = [] # name if 'developers' in appData: name = assembleFormattedText(A.normal[appData['name'], A.fg.gray[' by '], u', '.join(appData['developers'])]) else: name = appData['name'] data.append(name) # package contents (might need to trim this...) if 'apps' in appData: appNames = [app['name'] for app in appData['apps']] apps = u'Package containing: {}'.format(u', '.join(appNames)) data.append(apps) # genres if 'genres' in appData: data.append(u'Genres: ' + ', '.join([genre['description'] for genre in appData['genres']])) # release date releaseDate = appData['release_date'] if not releaseDate['coming_soon']: if releaseDate['date']: data.append(u'Release Date: ' + releaseDate['date']) else: data.append(assembleFormattedText(A.normal['Release Date: ', A.fg.cyan[str(releaseDate['date'])]])) # metacritic # http://www.metacritic.com/faq#item32 (Why is the breakdown of green, yellow, and red scores different for games?) if 'metacritic' in appData: metaScore = appData['metacritic']['score'] if metaScore < 50: metacritic = assembleFormattedText(A.normal[A.fg.red[str(metaScore)]]) elif metaScore < 75: metacritic = assembleFormattedText(A.normal[A.fg.yellow[str(metaScore)]]) else: metacritic = assembleFormattedText(A.normal[A.fg.green[str(metaScore)]]) data.append(u'Metacritic: {0}'.format(metacritic)) # prices priceField = {'app': 'price_overview', 'package': 'price'}[steamType] if priceField in appData: prices = {'USD': appData[priceField], 'GBP': self.getSteamPrice(steamType, steamId, 'GB'), 'EUR': self.getSteamPrice(steamType, steamId, 'FR'), 'AUD': self.getSteamPrice(steamType, steamId, 'AU')} currencies = {'USD': u'$', 'GBP': u'\u00A3', 'EUR': u'\u20AC', 'AUD': u'AU$'} if not prices['AUD'] or prices['AUD']['final'] == prices['USD']['final']: del prices['AUD'] # filter out any missing prices prices = {key: val for key, val in prices.iteritems() if val} priceString = u'/'.join([currencies[val['currency']] + unicode(val['final'] / 100.0) for val in prices.values()]) if prices['USD']['discount_percent'] > 0: priceString += assembleFormattedText(A.normal[A.fg.green[' ({0}% sale!)'.format(prices['USD']['discount_percent'])]]) data.append(priceString) # description if 'about_the_game' in appData and appData['about_the_game'] is not None: limit = 150 description = re.sub(r'(<[^>]+>|[\r\n\t])+', assembleFormattedText(A.normal[' ', A.fg.gray['>'], ' ']), appData['about_the_game']) if len(description) > limit: description = u'{0} ...'.format(description[:limit].rsplit(' ', 1)[0]) data.append(description) return IRCResponse(ResponseType.Say, self.graySplitter.join(data), message.ReplyTo, {'urlfollowURL': 'http://store.steampowered.com/{}/{}'.format({'app': 'app', 'package': 'sub'}[steamType], steamId)}) @classmethod def getSteamPrice(cls, appType, appId, region): webPage = WebUtils.fetchURL('http://store.steampowered.com/api/{0}details/?{0}ids={1}&cc={2}&l=english&v=1'.format(appType, appId, region)) priceField = {'app': 'price_overview', 'package': 'price'}[appType] response = json.loads(webPage.body) if 'data' not in response[appId]: return if region == 'AU': response[appId]['data'][priceField]['currency'] = 'AUD' return response[appId]['data'][priceField] def FollowKickstarter(self, ksID, message): webPage = WebUtils.fetchURL('https://www.kickstarter.com/projects/{}/description'.format(ksID)) soup = BeautifulSoup(webPage.body) data = [] shorturl = soup.find(rel='shorturl')['href'] if shorturl is None: shorturl = 'https://www.kickstarter.com/projects/{}/'.format(ksID) title = soup.find(property='og:title') if title is not None: creator = soup.find(attrs={'data-modal-class': 'modal_project_by'}) if creator is not None: data.append(unicode(assembleFormattedText(A.normal['{0}', A.fg.gray[' by '], '{1}'])).format(title['content'].strip(), creator.text.strip())) else: data.append(title['content'].strip()) stats = soup.find(id='stats') # projects in progress if stats is not None: backerCount = stats.find(id='backers_count') if backerCount is not None: backerCount = int(backerCount['data-backers-count']) # completed projects else: backerCount = soup.find(class_='NS_projects__spotlight_stats') if backerCount is not None: backerCount = int(backerCount.b.text.strip().split()[0].replace(',', '')) data.append('Backers: {0:,}'.format(backerCount)) if stats is not None: pledgeData = stats.find(id='pledged') if pledgeData is not None: pledged = float(pledgeData['data-pledged']) goal = float(pledgeData['data-goal']) percentage = float(pledgeData['data-percent-raised']) if backerCount > 0: pledgePerBacker = pledged / backerCount else: pledgePerBacker = 0 currency = stats.find_all(attrs={'data-currency': True})[-1]['data-currency'] else: money = soup.select('span.money.no-code') if money: pledgedString = money[0].text.strip() goalString = money[1].text.strip() pledged = float(re.sub(ur'[^0-9.]', u'', pledgedString)) goal = float(re.sub(ur'[^0-9.]', u'', goalString)) percentage = (pledged / goal) if backerCount > 0: pledgePerBacker = pledged / backerCount else: pledgePerBacker = 0 currency = soup.select('span.money.no-code')[-1]['class'] currency.remove('money') currency.remove('no-code') currency = currency[0].upper() if percentage >= 1.0: percentageString = A.fg.green['({3:,.0f}% funded)'] else: percentageString = A.fg.red['({3:,.0f}% funded)'] pledgePerBackerString = A.fg.gray['{4:,.0f}/backer'] pledgedString = assembleFormattedText(A.normal['Pledged: {0:,.0f}', A.fg.gray['/'], '{1:,.0f} {2} ', percentageString, ' ', pledgePerBackerString]) data.append(pledgedString.format(pledged, goal, currency, #pledgedData.data['data-currency'], percentage * 100, pledgePerBacker)) findState = soup.find(id='main_content') if 'Project-state-canceled' in findState['class']: data.append(assembleFormattedText(A.normal[A.fg.red['Cancelled']])) elif 'Project-state-suspended' in findState['class']: data.append(assembleFormattedText(A.normal[A.fg.blue['Suspended']])) elif 'Project-state-failed' in findState['class']: data.append(assembleFormattedText(A.normal[A.fg.red['Failed']])) elif 'Project-state-successful' in findState['class']: data.append(assembleFormattedText(A.normal[A.fg.green['Successful']])) elif 'Project-state-live' in findState['class']: duration = stats.find(id='project_duration_data') if duration is not None: remaining = float(duration['data-hours-remaining']) days = math.floor(remaining/24) hours = remaining % 24 data.append('Duration: {0:.0f} days {1:.1f} hours to go'.format(days, hours)) return IRCResponse(ResponseType.Say, self.graySplitter.join(data), message.ReplyTo, {'urlfollowURL': shorturl}) def FollowTwitch(self, channel, message): # Heavily based on Didero's DideRobot code for the same # https://github.com/Didero/DideRobot/blob/06629fc3c8bddf8f729ce2d27742ff999dfdd1f6/commands/urlTitleFinder.py#L37 # TODO: other stats? chanData = {} channelOnline = False twitchHeaders = [('Accept', 'application/vnd.twitchtv.v2+json')] webPage = WebUtils.fetchURL(u'https://api.twitch.tv/kraken/streams/{}'.format(channel), twitchHeaders) streamData = json.loads(webPage.body) if 'stream' in streamData and streamData['stream'] is not None: chanData = streamData['stream']['channel'] channelOnline = True elif 'error' not in streamData: webPage = WebUtils.fetchURL(u'https://api.twitch.tv/kraken/channels/{}'.format(channel), twitchHeaders) chanData = json.loads(webPage.body) if len(chanData) > 0: if channelOnline: channelInfo = assembleFormattedText(A.fg.green['']) + u'{}'.format(chanData['display_name']) + assembleFormattedText(A.normal['']) else: channelInfo = assembleFormattedText(A.fg.red['']) + u'{}'.format(chanData['display_name']) + assembleFormattedText(A.normal['']) channelInfo += u' "{}"'.format(re.sub(r'[\r\n]+', self.graySplitter, chanData['status'].strip())) if chanData['game'] is not None: channelInfo += assembleFormattedText(A.normal[A.fg.gray[', playing '], u'{}'.format(chanData['game'])]) if chanData['mature']: channelInfo += assembleFormattedText(A.normal[A.fg.lightRed[' [Mature]']]) if channelOnline: channelInfo += assembleFormattedText(A.normal[A.fg.green[' (Live with {0:,d} viewers)'.format(streamData['stream']['viewers'])]]) else: channelInfo += assembleFormattedText(A.normal[A.fg.red[' (Offline)']]) return IRCResponse(ResponseType.Say, channelInfo, message.ReplyTo, {'urlfollowURL': 'https://twitch.tv/{}'.format(channel)}) def FollowStandard(self, url, message): webPage = WebUtils.fetchURL(url) if webPage is None: return if webPage.responseUrl != url: return self.DispatchToFollows(webPage.responseUrl, message) title = self.GetTitle(webPage.body) if title is not None: return IRCResponse(ResponseType.Say, u'{0} (at {1})'.format(title, webPage.domain), message.ReplyTo, {'urlfollowURL': url}) return def GetTitle(self, webpage): soup = BeautifulSoup(webpage) title = soup.title if title: title = title.text title = re.sub(u'[\r\n]+', u'', title) # strip any newlines title = title.strip() # strip all whitespace either side title = re.sub(u'\s+', u' ', title) # replace multiple whitespace chars with a single space title = self.htmlParser.unescape(title) # unescape html entities # Split on the first space before 300 characters, and replace the rest with '...' if len(title) > 300: title = title[:300].rsplit(u' ', 1)[0] + u" ..." return title return None
# coding=utf-8 from collections import defaultdict import datetime import decimal import logging from json import dumps, loads, JSONEncoder from django.conf import settings from django.core import serializers from django.http import HttpResponse, Http404 from django.utils.crypto import constant_time_compare from django.utils.decorators import method_decorator, classonlymethod from django.utils.importlib import import_module from django.views.decorators.csrf import csrf_exempt from django.views.generic import FormView from django.db.models.query import QuerySet, ValuesQuerySet from django.utils.functional import curry, Promise from .compat import force_u from .models import APIKey from .utils import get_pairs_sign, prepare_uuid_string LOG = logging.getLogger('formapi') def autodiscover(): for app in settings.INSTALLED_APPS: try: import_module('%s.calls' % app) except ImportError: continue class AddHeaderAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): msg = ' '.join((self.extra.get('header'), msg)) return msg, kwargs class DjangoJSONEncoder(JSONEncoder): def default(self, obj): date_obj = self.default_date(obj) if date_obj is not None: return date_obj elif isinstance(obj, decimal.Decimal): return str(obj) elif isinstance(obj, ValuesQuerySet): return list(obj) elif isinstance(obj, QuerySet): return loads(serializers.serialize('json', obj)) elif isinstance(obj, Promise): return force_u(obj) return JSONEncoder.default(self, obj) def default_date(self, obj): if isinstance(obj, datetime.datetime): r = obj.isoformat() if obj.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, datetime.time): if obj.tzinfo is not None and obj.tzinfo.utcoffset(obj) is not None: raise ValueError("JSON can't represent timezone-aware times.") r = obj.isoformat() if obj.microsecond: r = r[:12] return r elif isinstance(obj, datetime.timedelta): return obj.seconds dumps = curry(dumps, cls=DjangoJSONEncoder) class API(FormView): template_name = 'formapi/api/form.html' signed_requests = True call_mapping = defaultdict(lambda: defaultdict(dict)) @classmethod def register(cls, call_cls, namespace, name=None, version='beta'): call_name = name or call_cls.__name__ API.call_mapping[version][namespace][call_name] = call_cls @classonlymethod def as_view(cls, **initkwargs): autodiscover() return super(API, cls).as_view(**initkwargs) def get_form_class(self): try: return API.call_mapping[self.version][self.namespace][self.call] except KeyError: raise Http404 def get_form_kwargs(self): kwargs = super(API, self).get_form_kwargs() if self.api_key: kwargs['api_key'] = self.api_key return kwargs def get_access_params(self): key = self.request.REQUEST.get('key') sign = self.request.REQUEST.get('sign') return key, sign def sign_ok(self, sign): digest = get_pairs_sign(secret=self.api_key.secret, sorted_pairs=self.normalized_parameters()) digest = prepare_uuid_string(digest) sign = prepare_uuid_string(sign) return constant_time_compare(sign, digest) def normalized_parameters(self): """ Normalize django request to key value pairs sorted by key first and then value """ for field in sorted(self.get_form(self.get_form_class()).fields.keys()): for item in sorted(self.request.REQUEST.getlist(field) or []): if item is not None: yield field, item def render_to_json_response(self, context, **response_kwargs): data = dumps(context) response_kwargs['content_type'] = 'application/json' return HttpResponse(data, **response_kwargs) def form_valid(self, form): self.log.info('Valid form received') test_call = False if self.api_key: test_call = self.api_key.test data = form.action(test_call) response_data = { 'success': not bool(len(form.errors)), 'errors': form.errors, 'data': data } return self.render_to_json_response(response_data) def form_invalid(self, form): self.log.info('Invalid form received') response_data = { 'success': False, 'errors': form.errors, 'data': False } return self.render_to_json_response(response_data, status=400) def get_log_header(self): if not hasattr(self, 'log_header'): key = getattr(self, 'api_key', None) self.log_header = '[%s][%s][%s]' % ( self.request.META['REMOTE_ADDR'], self.request.META['REQUEST_METHOD'], key.key if key else 'unknown') return self.log_header def setup_log(self, log): self.log = AddHeaderAdapter(log, {'header': self.get_log_header()}) def authorize(self): if getattr(self.get_form_class(), 'signed_requests', API.signed_requests): key, sign = self.get_access_params() ### Check for not revoked api key try: self.api_key = APIKey.objects.get(key=key, revoked=False) except APIKey.DoesNotExist: return False ### Check request signature return self.sign_ok(sign) return True @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): # Set up request self.request = request # Set up form class self.version = kwargs['version'] self.namespace = kwargs['namespace'] self.call = kwargs['call'] # Check access params self.api_key = None access_granted = self.authorize() # Setup logging to add header self.setup_log(LOG) # Authorize request if access_granted: self.log.info('Access Granted %s', self.request.REQUEST) return super(API, self).dispatch(request, *args, **kwargs) # Access denied self.log.warning('Access Denied %s', self.request.REQUEST) return HttpResponse(status=401)
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from src.test.py.bazel import test_base class BazelWindowsCppTest(test_base.TestBase): def createProjectFiles(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'package(', ' default_visibility = ["//visibility:public"],', ' features=["windows_export_all_symbols"]', ')', '', 'cc_library(', ' name = "A",', ' srcs = ["a.cc"],', ' hdrs = ["a.h"],', ' copts = ["/DCOMPILING_A_DLL"],', ' features = ["no_windows_export_all_symbols"],', ')', '', 'cc_library(', ' name = "B",', ' srcs = ["b.cc"],', ' hdrs = ["b.h"],', ' deps = [":A"],', ' copts = ["/DNO_DLLEXPORT"],', ')', '', 'cc_binary(', ' name = "C",', ' srcs = ["c.cc"],', ' deps = [":A", ":B" ],', ' linkstatic = 0,', ')', ]) self.ScratchFile('a.cc', [ '#include <stdio.h>', '#include "a.h"', 'int a = 0;', 'void hello_A() {', ' a++;', ' printf("Hello A, %d\\n", a);', '}', ]) self.ScratchFile('b.cc', [ '#include <stdio.h>', '#include "a.h"', '#include "b.h"', 'void hello_B() {', ' hello_A();', ' printf("Hello B\\n");', '}', ]) header_temp = [ '#ifndef %{name}_H', '#define %{name}_H', '', '#if NO_DLLEXPORT', ' #define DLLEXPORT', '#elif COMPILING_%{name}_DLL', ' #define DLLEXPORT __declspec(dllexport)', '#else', ' #define DLLEXPORT __declspec(dllimport)', '#endif', '', 'DLLEXPORT void hello_%{name}();', '', '#endif', ] self.ScratchFile('a.h', [line.replace('%{name}', 'A') for line in header_temp]) self.ScratchFile('b.h', [line.replace('%{name}', 'B') for line in header_temp]) c_cc_content = [ '#include <stdio.h>', '#include "a.h"', '#include "b.h"', '', 'void hello_C() {', ' hello_A();', ' hello_B();', ' printf("Hello C\\n");', '}', '', 'int main() {', ' hello_C();', ' return 0;', '}', ] self.ScratchFile('c.cc', c_cc_content) self.ScratchFile('lib/BUILD', [ 'cc_library(', ' name = "A",', ' srcs = ["dummy.cc"],', ' features = ["windows_export_all_symbols"],', ' visibility = ["//visibility:public"],', ')', ]) self.ScratchFile('lib/dummy.cc', ['void dummy() {}']) self.ScratchFile('main/main.cc', c_cc_content) def getBazelInfo(self, info_key): exit_code, stdout, stderr = self.RunBazel(['info', info_key]) self.AssertExitCode(exit_code, 0, stderr) return stdout[0] def testBuildDynamicLibraryWithUserExportedSymbol(self): self.createProjectFiles() bazel_bin = self.getBazelInfo('bazel-bin') # //:A export symbols by itself using __declspec(dllexport), so it doesn't # need Bazel to export symbols using DEF file. exit_code, _, stderr = self.RunBazel( ['build', '//:A', '--output_groups=dynamic_library']) self.AssertExitCode(exit_code, 0, stderr) # TODO(pcloudy): change suffixes to .lib and .dll after making DLL # extensions correct on Windows. import_library = os.path.join(bazel_bin, 'A.if.lib') shared_library = os.path.join(bazel_bin, 'A.dll') empty_def_file = os.path.join(bazel_bin, 'A.gen.empty.def') self.assertTrue(os.path.exists(import_library)) self.assertTrue(os.path.exists(shared_library)) # An empty DEF file should be generated for //:A self.assertTrue(os.path.exists(empty_def_file)) def testBuildDynamicLibraryWithExportSymbolFeature(self): self.createProjectFiles() bazel_bin = self.getBazelInfo('bazel-bin') # //:B doesn't export symbols by itself, so it need Bazel to export symbols # using DEF file. exit_code, _, stderr = self.RunBazel( ['build', '//:B', '--output_groups=dynamic_library']) self.AssertExitCode(exit_code, 0, stderr) # TODO(pcloudy): change suffixes to .lib and .dll after making DLL # extensions correct on Windows. import_library = os.path.join(bazel_bin, 'B.if.lib') shared_library = os.path.join(bazel_bin, 'B.dll') def_file = os.path.join(bazel_bin, 'B.gen.def') self.assertTrue(os.path.exists(import_library)) self.assertTrue(os.path.exists(shared_library)) # DEF file should be generated for //:B self.assertTrue(os.path.exists(def_file)) # Test build //:B if windows_export_all_symbols feature is disabled by # no_windows_export_all_symbols. exit_code, _, stderr = self.RunBazel([ 'build', '//:B', '--output_groups=dynamic_library', '--features=no_windows_export_all_symbols' ]) self.AssertExitCode(exit_code, 0, stderr) import_library = os.path.join(bazel_bin, 'B.if.lib') shared_library = os.path.join(bazel_bin, 'B.dll') empty_def_file = os.path.join(bazel_bin, 'B.gen.empty.def') self.assertTrue(os.path.exists(import_library)) self.assertTrue(os.path.exists(shared_library)) # An empty DEF file should be generated for //:B self.assertTrue(os.path.exists(empty_def_file)) self.AssertFileContentNotContains(empty_def_file, 'hello_B') def testBuildCcBinaryWithDependenciesDynamicallyLinked(self): self.createProjectFiles() bazel_bin = self.getBazelInfo('bazel-bin') # Since linkstatic=0 is specified for //:C, it's dependencies should be # dynamically linked. exit_code, _, stderr = self.RunBazel(['build', '//:C']) self.AssertExitCode(exit_code, 0, stderr) # TODO(pcloudy): change suffixes to .lib and .dll after making DLL # extensions correct on # Windows. # a_import_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.if.lib'))) # a_shared_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.dll'))) # a_def_file self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.gen.empty.def'))) # b_import_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.if.lib'))) # b_shared_library self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.dll'))) # b_def_file self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.gen.def'))) # c_exe self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'C.exe'))) def testBuildCcBinaryFromDifferentPackage(self): self.createProjectFiles() self.ScratchFile('main/BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', ' linkstatic = 0,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//main:main']) self.AssertExitCode(exit_code, 0, stderr) # Test if A.dll and B.dll are copied to the directory of main.exe main_bin = os.path.join(bazel_bin, 'main/main.exe') self.assertTrue(os.path.exists(main_bin)) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/A.dll'))) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/B.dll'))) # Run the binary to see if it runs successfully exit_code, stdout, stderr = self.RunProgram([main_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout) def testBuildCcBinaryDependsOnConflictDLLs(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["//:B", "//lib:A"],', # Transitively depends on //:A ' linkstatic = 0,' ')', ]) # //main:main depends on both //lib:A and //:A, # their dlls are both called A.dll, # so there should be a conflict error exit_code, _, stderr = self.RunBazel(['build', '//main:main']) self.AssertExitCode(exit_code, 1, stderr) self.assertIn( 'ERROR: file \'main/A.dll\' is generated by these conflicting ' 'actions:', ''.join(stderr)) def testBuildDifferentCcBinariesDependOnConflictDLLs(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 0,' ')', '', 'cc_binary(', ' name = "other_main",', ' srcs = ["other_main.cc"],', ' deps = ["//lib:A"],', ' linkstatic = 0,' ')', ]) self.ScratchFile('main/other_main.cc', ['int main() {return 0;}']) # Building //main:main should succeed exit_code, _, stderr = self.RunBazel(['build', '//main:main']) self.AssertExitCode(exit_code, 0, stderr) # Building //main:other_main *and* //main:main should fail exit_code, _, stderr = self.RunBazel( ['build', '//main:main', '//main:other_main']) self.AssertExitCode(exit_code, 1, stderr) self.assertIn( 'ERROR: file \'main/A.dll\' is generated by these conflicting ' 'actions:', ''.join(stderr)) def testDLLIsCopiedFromExternalRepo(self): self.ScratchFile('ext_repo/WORKSPACE') self.ScratchFile('ext_repo/BUILD', [ 'cc_library(', ' name = "A",', ' srcs = ["a.cc"],', ' features = ["windows_export_all_symbols"],', ' visibility = ["//visibility:public"],', ')', ]) self.ScratchFile('ext_repo/a.cc', [ '#include <stdio.h>', 'void hello_A() {', ' printf("Hello A\\n");', '}', ]) self.ScratchFile('WORKSPACE', [ 'local_repository(', ' name = "ext_repo",', ' path = "ext_repo",', ')', ]) self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ' deps = ["@ext_repo//:A"],', ' linkstatic = 0,', ')', ]) self.ScratchFile('main.cc', [ 'extern void hello_A();', '', 'int main() {', ' hello_A();', ' return 0;', '}', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//:main']) self.AssertExitCode(exit_code, 0, stderr) # Test if A.dll is copied to the directory of main.exe main_bin = os.path.join(bazel_bin, 'main.exe') self.assertTrue(os.path.exists(main_bin)) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.dll'))) # Run the binary to see if it runs successfully exit_code, stdout, stderr = self.RunProgram([main_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(['Hello A'], stdout) def testDynamicLinkingMSVCRT(self): self.createProjectFiles() bazel_output = self.getBazelInfo('output_path') # By default, it should link to msvcrt dynamically. exit_code, _, stderr = self.RunBazel( ['build', '//:A', '--output_groups=dynamic_library', '-s']) paramfile = os.path.join( bazel_output, 'x64_windows-fastbuild/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertIn('/MD', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:msvcrt.lib') self.assertNotIn('/MT', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:libcmt.lib') # Test build in debug mode. exit_code, _, stderr = self.RunBazel( ['build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library', '-s']) paramfile = os.path.join(bazel_output, 'x64_windows-dbg/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertIn('/MDd', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:msvcrtd.lib') self.assertNotIn('/MTd', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:libcmtd.lib') def testStaticLinkingMSVCRT(self): self.createProjectFiles() bazel_output = self.getBazelInfo('output_path') # With static_link_msvcrt feature, it should link to msvcrt statically. exit_code, _, stderr = self.RunBazel([ 'build', '//:A', '--output_groups=dynamic_library', '--features=static_link_msvcrt', '-s' ]) paramfile = os.path.join( bazel_output, 'x64_windows-fastbuild/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertNotIn('/MD', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:msvcrt.lib') self.assertIn('/MT', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:libcmt.lib') # Test build in debug mode. exit_code, _, stderr = self.RunBazel([ 'build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library', '--features=static_link_msvcrt', '-s' ]) paramfile = os.path.join(bazel_output, 'x64_windows-dbg/bin/A.dll-2.params') self.AssertExitCode(exit_code, 0, stderr) self.assertNotIn('/MDd', ''.join(stderr)) self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:msvcrtd.lib') self.assertIn('/MTd', ''.join(stderr)) self.AssertFileContentContains(paramfile, '/DEFAULTLIB:libcmtd.lib') def testBuildSharedLibraryFromCcBinaryWithStaticLink(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main.dll",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 1,' ' linkshared = 1,' ' features=["windows_export_all_symbols"]', ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel([ 'build', '//main:main.dll', '--output_groups=default,runtime_dynamic_libraries,interface_library' ]) self.AssertExitCode(exit_code, 0, stderr) main_library = os.path.join(bazel_bin, 'main/main.dll') main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib') def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def') self.assertTrue(os.path.exists(main_library)) self.assertTrue(os.path.exists(main_interface)) self.assertTrue(os.path.exists(def_file)) # A.dll and B.dll should not be copied. self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/A.dll'))) self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/B.dll'))) self.AssertFileContentContains(def_file, 'hello_A') self.AssertFileContentContains(def_file, 'hello_B') self.AssertFileContentContains(def_file, 'hello_C') def testBuildSharedLibraryFromCcBinaryWithDynamicLink(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main.dll",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 0,' ' linkshared = 1,' ' features=["windows_export_all_symbols"]', ')', '', 'genrule(', ' name = "renamed_main",', ' srcs = [":main.dll"],', ' outs = ["main_renamed.dll"],', ' cmd = "cp $< $@",', ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel([ 'build', '//main:main.dll', '--output_groups=default,runtime_dynamic_libraries,interface_library' ]) self.AssertExitCode(exit_code, 0, stderr) main_library = os.path.join(bazel_bin, 'main/main.dll') main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib') def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def') self.assertTrue(os.path.exists(main_library)) self.assertTrue(os.path.exists(main_interface)) self.assertTrue(os.path.exists(def_file)) # A.dll and B.dll should be built and copied because they belong to # runtime_dynamic_libraries output group. self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/A.dll'))) self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/B.dll'))) # hello_A and hello_B should not be exported. self.AssertFileContentNotContains(def_file, 'hello_A') self.AssertFileContentNotContains(def_file, 'hello_B') self.AssertFileContentContains(def_file, 'hello_C') # The copy should succeed since //main:main.dll is only supposed to refer to # main.dll, A.dll and B.dll should be in a separate output group. exit_code, _, stderr = self.RunBazel(['build', '//main:renamed_main']) self.AssertExitCode(exit_code, 0, stderr) def testGetDefFileOfSharedLibraryFromCcBinary(self): self.createProjectFiles() self.ScratchFile( 'main/BUILD', [ 'cc_binary(', ' name = "main.dll",', ' srcs = ["main.cc"],', ' deps = ["//:B"],', # Transitively depends on //:A ' linkstatic = 1,' ' linkshared = 1,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel( ['build', '//main:main.dll', '--output_groups=def_file']) self.AssertExitCode(exit_code, 0, stderr) # Although windows_export_all_symbols is not specified for this target, # we should still be able to get the DEF file by def_file output group. def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def') self.assertTrue(os.path.exists(def_file)) self.AssertFileContentContains(def_file, 'hello_A') self.AssertFileContentContains(def_file, 'hello_B') self.AssertFileContentContains(def_file, 'hello_C') def testBuildSharedLibraryWithoutAnySymbolExported(self): self.createProjectFiles() self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "A.dll",', ' srcs = ["a.cc", "a.h"],', ' copts = ["/DNO_DLLEXPORT"],', ' linkshared = 1,' ')', ]) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//:A.dll']) self.AssertExitCode(exit_code, 0, stderr) # Although windows_export_all_symbols is not specified for this target, # we should still be able to build a DLL without any symbol exported. empty_def_file = os.path.join(bazel_bin, 'A.dll.gen.empty.def') self.assertTrue(os.path.exists(empty_def_file)) self.AssertFileContentNotContains(empty_def_file, 'hello_A') def testUsingDefFileGeneratedFromCcLibrary(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('lib_A.cc', ['void hello_A() {}']) self.ScratchFile('lib_B.cc', ['void hello_B() {}']) self.ScratchFile('BUILD', [ 'cc_library(', ' name = "lib_A",', ' srcs = ["lib_A.cc"],', ')', '', 'cc_library(', ' name = "lib_B",', ' srcs = ["lib_B.cc"],', ' deps = [":lib_A"]', ')', '', 'filegroup(', ' name = "lib_B_symbols",', ' srcs = [":lib_B"],', ' output_group = "def_file",', ')', '', 'cc_binary(', ' name = "lib.dll",', ' deps = [":lib_B"],', ' win_def_file = ":lib_B_symbols",', ' linkshared = 1,', ')', ]) # Test specifying DEF file in cc_binary bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//:lib.dll', '-s']) self.AssertExitCode(exit_code, 0, stderr) def_file = bazel_bin + '/lib_B.gen.def' self.assertTrue(os.path.exists(def_file)) # hello_A should not be exported self.AssertFileContentNotContains(def_file, 'hello_A') # hello_B should be exported self.AssertFileContentContains(def_file, 'hello_B') def testWinDefFileAttribute(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('lib.cc', ['void hello() {}']) self.ScratchFile('my_lib.def', [ 'EXPORTS', ' ?hello@@YAXXZ', ]) self.ScratchFile('BUILD', [ 'cc_library(', ' name = "lib",', ' srcs = ["lib.cc"],', ' win_def_file = "my_lib.def",', ')', '', 'cc_binary(', ' name = "lib_dy.dll",', ' srcs = ["lib.cc"],', ' win_def_file = "my_lib.def",', ' linkshared = 1,', ')', ]) # Test exporting symbols using custom DEF file in cc_library. # Auto-generating DEF file should be disabled when custom DEF file specified exit_code, _, stderr = self.RunBazel([ 'build', '//:lib', '-s', '--output_groups=dynamic_library', '--features=windows_export_all_symbols' ]) self.AssertExitCode(exit_code, 0, stderr) bazel_bin = self.getBazelInfo('bazel-bin') lib_if = os.path.join(bazel_bin, 'lib.if.lib') lib_def = os.path.join(bazel_bin, 'lib.gen.def') self.assertTrue(os.path.exists(lib_if)) self.assertFalse(os.path.exists(lib_def)) # Test specifying DEF file in cc_binary exit_code, _, stderr = self.RunBazel(['build', '//:lib_dy.dll', '-s']) self.AssertExitCode(exit_code, 0, stderr) filepath = bazel_bin + '/lib_dy.dll-2.params' with open(filepath, 'r', encoding='latin-1') as param_file: self.assertIn('/DEF:my_lib.def', param_file.read()) def testCcImportRule(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_import(', ' name = "a_import",', ' static_library = "A.lib",', ' shared_library = "A.dll",', ' interface_library = "A.if.lib",', ' hdrs = ["a.h"],', ' alwayslink = 1,', ')', ]) exit_code, _, stderr = self.RunBazel([ 'build', '//:a_import', ]) self.AssertExitCode(exit_code, 0, stderr) def testCppErrorShouldBeVisible(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "bad",', ' srcs = ["bad.cc"],', ')', ]) self.ScratchFile('bad.cc', [ 'int main(int argc, char** argv) {', ' this_is_an_error();', '}', ]) exit_code, stdout, stderr = self.RunBazel(['build', '//:bad']) self.AssertExitCode(exit_code, 1, stderr) self.assertIn('this_is_an_error', ''.join(stdout)) def testBuildWithClangClByCompilerFlag(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel([ 'build', '-s', '--compiler=clang-cl', '--incompatible_enable_cc_toolchain_resolution=false', '//:main' ]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('clang-cl.exe', ''.join(stderr)) def testBuildWithClangClByToolchainResolution(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE', [ 'register_execution_platforms(', ' ":windows_clang"', ')', '', 'register_toolchains(', ' "@local_config_cc//:cc-toolchain-x64_windows-clang-cl",', ')', ]) self.ScratchFile('BUILD', [ 'platform(', ' name = "windows_clang",', ' constraint_values = [', ' "@platforms//cpu:x86_64",', ' "@platforms//os:windows",', ' "@bazel_tools//tools/cpp:clang-cl",', ' ]', ')', '', 'cc_binary(', ' name = "main",', ' srcs = ["main.cc"],', ')', ]) self.ScratchFile('main.cc', [ 'int main() {', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel([ 'build', '-s', '--incompatible_enable_cc_toolchain_resolution=true', '//:main' ]) self.AssertExitCode(exit_code, 0, stderr) self.assertIn('clang-cl.exe', ''.join(stderr)) def createSimpleCppWorkspace(self, name): work_dir = self.ScratchDir(name) self.ScratchFile(name + '/WORKSPACE', ['workspace(name = "%s")' % name]) self.ScratchFile( name + '/BUILD', ['cc_library(name = "lib", srcs = ["lib.cc"], hdrs = ["lib.h"])']) self.ScratchFile(name + '/lib.h', ['void hello();']) self.ScratchFile(name + '/lib.cc', ['#include "lib.h"', 'void hello() {}']) return work_dir # Regression test for https://github.com/bazelbuild/bazel/issues/9172 def testCacheBetweenWorkspaceWithDifferentNames(self): cache_dir = self.ScratchDir('cache') dir_a = self.createSimpleCppWorkspace('A') dir_b = self.createSimpleCppWorkspace('B') exit_code, _, stderr = self.RunBazel( ['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_a) self.AssertExitCode(exit_code, 0, stderr) exit_code, _, stderr = self.RunBazel( ['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_b) self.AssertExitCode(exit_code, 0, stderr) # Regression test for https://github.com/bazelbuild/bazel/issues/9321 def testCcCompileWithTreeArtifactAsSource(self): self.CreateWorkspaceWithDefaultRepos('WORKSPACE') self.ScratchFile('BUILD', [ 'load(":genccs.bzl", "genccs")', '', 'genccs(', ' name = "gen_tree",', ')', '', 'cc_library(', ' name = "main",', ' srcs = [ "gen_tree" ]', ')', '', 'cc_binary(', ' name = "genccs",', ' srcs = [ "genccs.cpp" ],', ')', ]) self.ScratchFile('genccs.bzl', [ 'def _impl(ctx):', ' tree = ctx.actions.declare_directory(ctx.attr.name + ".cc")', ' ctx.actions.run(', ' inputs = [],', ' outputs = [ tree ],', ' arguments = [ tree.path ],', ' progress_message = "Generating cc files into \'%s\'" % tree.path,', ' executable = ctx.executable._tool,', ' )', '', ' return [ DefaultInfo(files = depset([ tree ])) ]', '', 'genccs = rule(', ' implementation = _impl,', ' attrs = {', ' "_tool": attr.label(', ' executable = True,', ' cfg = "host",', ' allow_files = True,', ' default = Label("//:genccs"),', ' )', ' }', ')', ]) self.ScratchFile('genccs.cpp', [ '#include <fstream>', '#include <Windows.h>', 'using namespace std;', '', 'int main (int argc, char *argv[]) {', ' CreateDirectory(argv[1], NULL);', ' ofstream myfile;', ' myfile.open(string(argv[1]) + string("/foo.cpp"));', ' myfile << "int main() { return 42; }";', ' return 0;', '}', ]) exit_code, _, stderr = self.RunBazel(['build', '//:main']) self.AssertExitCode(exit_code, 0, stderr) if __name__ == '__main__': unittest.main()
# VMware vCloud Python SDK # Copyright (c) 2014 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 import time import requests from StringIO import StringIO from schema.vcd.v1_5.schemas.vcloud import vAppType, vdcType, queryRecordViewType, taskType, vcloudType from schema.vcd.v1_5.schemas.vcloud.taskType import TaskType from schema.vcd.v1_5.schemas.vcloud.vAppType import VAppType, NetworkConnectionSectionType from iptools import ipv4, IpRange from pyvcloud.helper import CommonUtils from pyvcloud import _get_logger, Http, Log VCLOUD_STATUS_MAP = { -1: "Could not be created", 0: "Unresolved", 1: "Resolved", 2: "Deployed", 3: "Suspended", 4: "Powered on", 5: "Waiting for user input", 6: "Unknown state", 7: "Unrecognized state", 8: "Powered off", 9: "Inconsistent state", 10: "Children do not all have the same status", 11: "Upload initiated, OVF descriptor pending", 12: "Upload initiated, copying contents", 13: "Upload initiated , disk contents pending", 14: "Upload has been quarantined", 15: "Upload quarantine period has expired" } class VAPP(object): def __init__(self, vApp, headers, verify, log=False): self.me = vApp self.headers = headers self.verify = verify self.response = None self.logger = _get_logger() if log else None @property def name(self): return self.me.get_name() def execute(self, operation, http, body=None, targetVM=None): """ Execute an operation against a VM as an Asychronous Task. :param operation: (str): The command to execute :param http: (str): The http operation. :param body: (str, optional): a body for the http request :param targetVM: (str, optional): The name of the VM that will be the target of the request. :return: (TaskType or Bool) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n Or False if the request failed, error and debug level messages are logged. """ vApp = targetVM if targetVM else self.me link = filter(lambda link: link.get_rel() == operation, vApp.get_Link()) if not link: Log.error(self.logger, "link not found; rel=%s" % operation) Log.debug(self.logger, "vApp href=%s, name=%s" % (vApp.get_href(), vApp.get_name())) return False else: if http == "post": headers = self.headers if body and body.startswith('<DeployVAppParams '): headers['Content-type'] = 'application/vnd.vmware.vcloud.deployVAppParams+xml' elif body and body.startswith('<UndeployVAppParams '): headers['Content-type'] = 'application/vnd.vmware.vcloud.undeployVAppParams+xml' elif body and body.startswith('<CreateSnapshotParams '): headers['Content-type'] = 'application/vnd.vmware.vcloud.createSnapshotParams+xml' self.response = Http.post(link[0].get_href(), data=body, headers=headers, verify=self.verify, logger=self.logger) elif http == "put": self.response = Http.put(link[0].get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger) else: self.response = Http.delete(link[0].get_href(), headers=self.headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) else: Log.debug(self.logger, "failed; response status=%d, content=%s" % (self.response.status_code, self.response.text)) return False def deploy(self, powerOn=True): """ Deploy the vapp :param powerOn: (bool, optional): Power on the vApp and its contained VMs after deployment. :return: (bool): True if the user was vApp was successfully deployed, False otherwise. """ powerOnValue = 'true' if powerOn else 'false' deployVAppParams = vcloudType.DeployVAppParamsType() deployVAppParams.set_powerOn(powerOnValue) body = CommonUtils.convertPythonObjToStr(deployVAppParams, name = "DeployVAppParams", namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"') return self.execute("deploy", "post", body=body) def undeploy(self, action='powerOff'): """ Undeploy the vapp :param action: (bool, optional): Power on the vApp and its contained VMs after deployment. * The valid values of action are - **powerOff** (Power off the VMs. This is the default action if this attribute is missing or empty), - **suspend** (Suspend the VMs), shutdown (Shut down the VMs), - **force** (Attempt to power off the VMs. Failures in undeploying the VM or associated networks are ignored. All references to the vApp and its VMs are removed from the database), - **default** (Use the actions, order, and delay specified in the StartupSection). :returns: (bool): True if the user was vApp was successfully deployed, False otherwise. """ undeployVAppParams = vcloudType.UndeployVAppParamsType() undeployVAppParams.set_UndeployPowerAction(action) body = CommonUtils.convertPythonObjToStr(undeployVAppParams, name = "UndeployVAppParams", namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"') return self.execute("undeploy", "post", body=body) def reboot(self): """ Reboot the vApp :returns: (None) """ self.execute("power:reboot", "post") def poweron(self): """ Power on the vApp :returns: (None) """ return self.execute("power:powerOn", "post") def poweroff(self): """ Power off the vApp :returns: (None) """ return self.execute("power:powerOff", "post") def shutdown(self): """ Shutdown the vApp :returns: (None) """ return self.execute("power:shutdown", "post") def suspend(self): """ Suspend the vApp :returns: (None) """ self.execute("power:suspend", "post") def reset(self): """ Reset the vApp :returns: (None) """ self.execute("power:reset", "post") def delete(self): """ Delete the vApp Note: The vApp must be undeployed and power it off before it is deleted. :returns: (None) """ return self.execute("remove", "delete") def create_snapshot(self): """ Create a new snapshot of the vApp state. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ snapshot_name = '{}_snapshot_{}'.format(self.name, int(round(time.time() * 1000))) createSnapshotParams = vcloudType.CreateSnapshotParamsType() createSnapshotParams.set_name(snapshot_name) createSnapshotParams.set_Description(snapshot_name) body = CommonUtils.convertPythonObjToStr(createSnapshotParams, name="CreateSnapshotParams", namespacedef='xmlns="http://www.vmware.com/vcloud/v1.5"') return self.execute("snapshot:create", "post", body) def revert_snapshot(self): """ Revert to a previous vApp snapshot. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ return self.execute("snapshot:revertToCurrent", "post") def delete_snapshot(self): """ Delete an existing snapshot. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ return self.execute("snapshot:removeAll", "post") @staticmethod def create_networkConfigSection(network_name, network_href, fence_mode, prev_network_config_section=None): parentNetwork = vcloudType.ReferenceType(href=network_href, name=network_name) configuration = vcloudType.NetworkConfigurationType() configuration.set_ParentNetwork(parentNetwork) configuration.set_FenceMode(fence_mode) networkConfig = vcloudType.VAppNetworkConfigurationType() networkConfig.set_networkName(network_name) networkConfig.set_Configuration(configuration) info = vcloudType.Msg_Type() info.set_valueOf_("Configuration parameters for logical networks") networkConfigSection = None if prev_network_config_section is None: networkConfigSection = vcloudType.NetworkConfigSectionType() else: networkConfigSection = prev_network_config_section networkConfigSection.add_NetworkConfig(networkConfig) networkConfigSection.set_Info(vAppType.cimString(valueOf_="Network config")) return networkConfigSection def connect_vms(self, network_name, connection_index, connections_primary_index=None, ip_allocation_mode='DHCP', mac_address=None, ip_address=None): """ Attach vms to a virtual network. something helpful. :param network_name: (str): The network name to connect the VM to. :param connection_index: (str): Virtual slot number associated with this NIC. First slot number is 0. :param connections_primary_index: (str): Virtual slot number associated with the NIC that should be considered this \n virtual machine's primary network connection. Defaults to slot 0. :param ip_allocation_mode: (str, optional): IP address allocation mode for this connection. * One of: - POOL (A static IP address is allocated automatically from a pool of addresses.) - DHCP (The IP address is obtained from a DHCP service.) - MANUAL (The IP address is assigned manually in the IpAddress element.) - NONE (No IP addressing mode specified.) :param mac_address: (str): the MAC address associated with the NIC. :param ip_address: (str): the IP address assigned to this NIC. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ children = self.me.get_Children() if children: vms = children.get_Vm() for vm in vms: new_connection = self._create_networkConnection( network_name, connection_index, ip_allocation_mode, mac_address, ip_address) networkConnectionSection = [section for section in vm.get_Section() if isinstance(section, NetworkConnectionSectionType)][0] self._modify_networkConnectionSection( networkConnectionSection, new_connection, connections_primary_index) output = StringIO() networkConnectionSection.export(output, 0, name_ = 'NetworkConnectionSection', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmw="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body=output.getvalue().replace("vmw:Info", "ovf:Info") self.response = Http.put(vm.get_href() + "/networkConnectionSection/", data=body, headers=self.headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) def disconnect_vms(self, network_name=None): """ Disconnect the vm from the vapp network. :param network_name: (string): The name of the vApp network. If None, then disconnect from all the networks. :return: (bool): True if the user was vApp was successfully deployed, False otherwise. """ children = self.me.get_Children() if children: vms = children.get_Vm() for vm in vms: Log.debug(self.logger, "child VM name=%s" % vm.get_name()) networkConnectionSection = [section for section in vm.get_Section() if isinstance(section, NetworkConnectionSectionType)][0] found = -1 if network_name is None: networkConnectionSection.set_NetworkConnection([]) found = 1 else: for index, networkConnection in enumerate(networkConnectionSection.get_NetworkConnection()): if networkConnection.get_network() == network_name: found = index break if found != -1: networkConnectionSection.NetworkConnection.pop(found) if found != -1: output = StringIO() networkConnectionSection.export(output, 0, name_ = 'NetworkConnectionSection', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmw="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body=output.getvalue().replace("vmw:Info", "ovf:Info") self.response = Http.put(vm.get_href() + "/networkConnectionSection/", data=body, headers=self.headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) task = TaskType() task.set_status("success") task.set_Progress("100") return task def connect_to_network(self, network_name, network_href, fence_mode='bridged'): """ Connect the vApp to an existing virtual network in the VDC. :param network_name: (str): The name of the virtual network. :param network_href: (str): A uri that points to the network resource. :param fence_mode: (str, optional): :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ vApp_NetworkConfigSection = [section for section in self.me.get_Section() if section.__class__.__name__ == "NetworkConfigSectionType"][0] link = [link for link in vApp_NetworkConfigSection.get_Link() if link.get_type() == "application/vnd.vmware.vcloud.networkConfigSection+xml"][0] for networkConfig in vApp_NetworkConfigSection.get_NetworkConfig(): if networkConfig.get_networkName() == network_name: task = TaskType() task.set_status("success") task.set_Progress("100") return task networkConfigSection = VAPP.create_networkConfigSection(network_name, network_href, fence_mode, vApp_NetworkConfigSection) output = StringIO() networkConfigSection.export(output, 0, name_ = 'NetworkConfigSection', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body = output.getvalue().\ replace('Info msgid=""', "ovf:Info").replace("Info", "ovf:Info").replace(":vmw", "").replace("vmw:","")\ .replace("RetainNetovf", "ovf").replace("ovf:InfoAcrossDeployments","RetainNetInfoAcrossDeployments") self.response = Http.put(link.get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) def disconnect_from_networks(self): """ Disconnect the vApp from currently connected virtual networks. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ networkConfigSection = [section for section in self.me.get_Section() if section.__class__.__name__ == "NetworkConfigSectionType"][0] link = [link for link in networkConfigSection.get_Link() if link.get_type() == "application/vnd.vmware.vcloud.networkConfigSection+xml"][0] networkConfigSection.NetworkConfig[:] = [] output = StringIO() networkConfigSection.export(output, 0, name_ = 'NetworkConfigSection', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body = output.getvalue().\ replace("vmw:", "").replace('Info xmlns:vmw="http://www.vmware.com/vcloud/v1.5" msgid=""', "ovf:Info").\ replace("/Info", "/ovf:Info") self.response = Http.put(link.get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) def disconnect_from_network(self, network_name): """ Disconnect the vApp from an existing virtual network in the VDC. :param network_name: (str): The name of the virtual network. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. """ networkConfigSection = [section for section in self.me.get_Section() if section.__class__.__name__ == "NetworkConfigSectionType"][0] link = [link for link in networkConfigSection.get_Link() if link.get_type() == "application/vnd.vmware.vcloud.networkConfigSection+xml"][0] found = -1 for index, networkConfig in enumerate(networkConfigSection.get_NetworkConfig()): if networkConfig.get_networkName() == network_name: found = index if found != -1: networkConfigSection.NetworkConfig.pop(found) output = StringIO() networkConfigSection.export(output, 0, name_ = 'NetworkConfigSection', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body = output.getvalue().\ replace("vmw:", "").replace('Info xmlns:vmw="http://www.vmware.com/vcloud/v1.5" msgid=""', "ovf:Info").\ replace("/Info", "/ovf:Info") self.response = Http.put(link.get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) def attach_disk_to_vm(self, vm_name, disk_ref): """ Attach a disk volume to a VM. The volume must have been previously added to the VDC. :param vm_name: (str): The name of the vm that the disk will be attached to. :param disk_ref: (str): The url of a disk resource. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. *Note:* A list of disk references for the vdc can be obtained using the VCA get_diskRefs() method """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) ==1: body = """ <DiskAttachOrDetachParams xmlns="http://www.vmware.com/vcloud/v1.5"> <Disk type="application/vnd.vmware.vcloud.disk+xml" href="%s" /> </DiskAttachOrDetachParams> """ % disk_ref.href return self.execute("disk:attach", "post", body=body, targetVM=vms[0]) def detach_disk_from_vm(self, vm_name, disk_ref): """ Detach a disk volume from a VM. The volume must have been previously attached to the VM. :param vm_name: (str): The name of the vm that the disk will be attached to. :param disk_ref: (str): The url of a disk resource. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. *Note:* A list of disk references for the vdc can be obtained using the VCA get_diskRefs() method """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) ==1: body = """ <DiskAttachOrDetachParams xmlns="http://www.vmware.com/vcloud/v1.5"> <Disk type="application/vnd.vmware.vcloud.disk+xml" href="%s" /> </DiskAttachOrDetachParams> """ % disk_ref.href return self.execute("disk:detach", "post", body=body, targetVM=vms[0]) def vm_media(self, vm_name, media, operation): """ Return a list of details for a media device attached to the VM. :param vm_name: (str): The name of the vm. :param media_name: (str): The name of the attached media. :return: (dict) a dictionary containing media details. \n Dictionary keys 'name','type','href' """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) ==1: body = """ <MediaInsertOrEjectParams xmlns="http://www.vmware.com/vcloud/v1.5"> <Media type="%s" name="%s" href="%s" /> </MediaInsertOrEjectParams> """ % (media.get('name'), media.get('id'), media.get('href')) return self.execute("media:%sMedia" % operation, "post", body=body, targetVM=vms[0]) def customize_guest_os(self, vm_name, customization_script=None, computer_name=None, admin_password=None, reset_password_required=False): """ Associate a customization script with a guest OS and execute the script. The VMware tools must be installed in the Guest OS. :param vm_name: (str): The name of the vm to be customized. :param customization_script: (str, Optional): The path to a file on the local file system containing the customization script. :param computer_name: (str, Optional): A new value for the the computer name. A default value for the template is used if a value is not set. :param admin_password: (str, Optional): A password value for the admin/root user. A password is autogenerated if a value is not supplied. :param reset_password_required: (bool): Force the user to reset the password on first login. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n if the task cannot be created a debug level log message is generated detailing the reason. """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) == 1: sections = vms[0].get_Section() customization_section = [section for section in sections if (section.__class__.__name__ == "GuestCustomizationSectionType") ][0] customization_section.set_Enabled(True) customization_section.set_ResetPasswordRequired( reset_password_required) customization_section.set_AdminAutoLogonEnabled(False) customization_section.set_AdminAutoLogonCount(0) if customization_script: customization_section.set_CustomizationScript( customization_script) if computer_name: customization_section.set_ComputerName(computer_name) if admin_password: customization_section.set_AdminPasswordEnabled(True) customization_section.set_AdminPasswordAuto(False) customization_section.set_AdminPassword(admin_password) output = StringIO() customization_section.export(output, 0, name_ = 'GuestCustomizationSection', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body = output.getvalue().\ replace("vmw:", "").replace('Info xmlns:vmw="http://www.vmware.com/vcloud/v1.5" msgid=""', "ovf:Info").\ replace("/Info", "/ovf:Info") headers = self.headers headers['Content-type'] = 'application/vnd.vmware.vcloud.guestcustomizationsection+xml' self.response = Http.put(customization_section.Link[0].href, data=body, headers=headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) else: Log.debug(self.logger, "failed; response status=%d, content=%s" % (self.response.status_code, self.response.text)) def force_customization(self, vm_name, power_on=True): """ Force the guest OS customization script to be run for a specific vm in the vApp. A customization script must have been previously associated with the VM using the pyvcloud customize_guest_os method or using the vCD console The VMware tools must be installed in the Guest OS. :param vm_name: (str): The name of the vm to be customized. :param power_on (bool): Wether to power the vm on after customization or not :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.b\n if the task cannot be created a debug level log message is generated detailing the reason. """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) == 1: sections = vms[0].get_Section() links = filter(lambda link: link.rel== "deploy", vms[0].Link) if len(links) == 1: forceCustomizationValue = 'true' deployVAppParams = vcloudType.DeployVAppParamsType() if power_on: deployVAppParams.set_powerOn('true') else: deployVAppParams.set_powerOn('false') deployVAppParams.set_deploymentLeaseSeconds(0) deployVAppParams.set_forceCustomization('true') body = CommonUtils.convertPythonObjToStr(deployVAppParams, name = "DeployVAppParams", namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"') headers = self.headers headers['Content-type'] = 'application/vnd.vmware.vcloud.deployVAppParams+xml' self.response = Http.post(links[0].href, data=body, headers=headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) else: Log.debug(self.logger, "response status=%d, content=%s" % (self.response.status_code, self.response.text)) def get_vms_network_info(self): """ List details of the networks associated with each of the vms in the vApp :return: (list) a list, one entry per vm, each vm entry contains a list, one entry per network, \n each network entry contains a dictionary of properties for the network. \n Dictionary keys 'network_name', 'ip', 'mac', 'is_connected', 'is_primary', 'allocation_mode' """ result = [] vms = self._get_vms() for vm in vms: nw_connections = [] sections = vm.get_Section() networkConnectionSection = filter(lambda section: section.__class__.__name__ == "NetworkConnectionSectionType", sections)[0] primary_index = networkConnectionSection.get_PrimaryNetworkConnectionIndex() connections = networkConnectionSection.get_NetworkConnection() for connection in connections: nw_connections.append( {'network_name': connection.get_network(), 'ip': connection.get_IpAddress(), 'mac': connection.get_MACAddress(), 'is_connected': connection.get_IsConnected(), 'is_primary': connection.get_NetworkConnectionIndex() == primary_index, 'allocation_mode': connection.get_IpAddressAllocationMode() }) result.append(nw_connections) return result def customize_on_next_poweron(self): """ Force the guest OS customization script to be run for the first VM in the vApp. A customization script must have been previously associated with the VM using the pyvcloud customize_guest_os method or using the vCD console The VMware tools must be installed in the Guest OS. :return: (bool) True if the request was accepted, False otherwise. If False an error level log message is generated. """ vm = self._get_vms()[0] link = filter(lambda link: link.get_rel() == "customizeAtNextPowerOn", vm.get_Link()) if link: self.response = Http.post(link[0].get_href(), data=None, headers=self.headers, logger=self.logger) if self.response.status_code == requests.codes.no_content: return True Log.error(self.logger, "link not found") return False def get_vms_details(self): """ Return a list the details for all VMs contained in the vApp. :return: (list) a list, one entry per vm containing a (dict) of properties for the VM. \n Dictionary keys 'name','status','cpus','memory','memory_mb','os','owner','admin_password','reset_password_required' """ result = [] children = self.me.get_Children() if children: vms = children.get_Vm() for vm in vms: name = vm.get_name() status = VCLOUD_STATUS_MAP[vm.get_status()] owner = self.me.get_Owner().get_User().get_name() sections = vm.get_Section() virtualHardwareSection = filter(lambda section: section.__class__.__name__== "VirtualHardwareSection_Type", sections)[0] items = virtualHardwareSection.get_Item() cpu = filter(lambda item: item.get_Description().get_valueOf_() == "Number of Virtual CPUs", items)[0] cpu_capacity = int(cpu.get_ElementName().get_valueOf_().split(" virtual CPU(s)")[0]) memory = filter(lambda item: item.get_Description().get_valueOf_() == "Memory Size", items)[0] memory_capacity_mb = int(memory.get_ElementName().get_valueOf_().split(" MB of memory")[0]) memory_capacity = memory_capacity_mb / 1024 operatingSystemSection = filter(lambda section: section.__class__.__name__== "OperatingSystemSection_Type", sections)[0] os = operatingSystemSection.get_Description().get_valueOf_() customization_section = filter(lambda section: section.__class__.__name__== "GuestCustomizationSectionType", sections)[0] result.append( {'name': name, 'status': status, 'cpus': cpu_capacity, 'memory': memory_capacity, 'memory_mb': memory_capacity_mb, 'os': os, 'owner': owner, 'admin_password': customization_section.get_AdminPassword(), 'reset_password_required': customization_section.get_ResetPasswordRequired() } ) Log.debug(self.logger, "details of VMs: %s" % result) return result def modify_vm_name(self, vm_index, vm_name): """ Modify the name of a VM in a vApp :param vm_index: (int):The index of the VM in the vApp 1==first VM :param vm_name: (str): The new name of the VM. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n if the task cannot be created a debug level log message is generated detailing the reason. :raises: Exception: If the named VM cannot be located or another error occured. """ children = self.me.get_Children() if children: assert len(children.get_Vm()) >= vm_index vm = children.get_Vm()[vm_index-1] assert vm href = vm.get_href() vm_name_old = vm.get_name() Log.debug(self.logger, "VM name change (%s) %s -> %s" % (vm_index, vm_name_old, vm_name)) vm.set_name(vm_name) vm.set_Section([]) output = StringIO() vm.export(output, 0, name_ = 'Vm', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmw="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"', pretty_print = True) body = output.getvalue() headers = self.headers headers['Content-type'] = 'application/vnd.vmware.vcloud.vm+xml' self.response = Http.post(href+'/action/reconfigureVm', data=body, headers=headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) else: raise Exception(self.response.status_code) raise Exception('can\'t find vm') def modify_vm_memory(self, vm_name, new_size): """ Modify the virtual Memory allocation for VM. :param vm_name: (str): The name of the vm to be customized. :param new_size: (int): The new memory allocation in MB. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n if the task cannot be created a debug level log message is generated detailing the reason. :raises: Exception: If the named VM cannot be located or another error occured. """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) == 1: sections = vm.get_Section() virtualHardwareSection = filter(lambda section: section.__class__.__name__== "VirtualHardwareSection_Type", sections)[0] items = virtualHardwareSection.get_Item() memory = filter(lambda item: item.get_Description().get_valueOf_() == "Memory Size", items)[0] href = memory.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href') en = memory.get_ElementName() en.set_valueOf_('%s MB of memory' % new_size) memory.set_ElementName(en) vq = memory.get_VirtualQuantity() vq.set_valueOf_(new_size) memory.set_VirtualQuantity(vq) weight = memory.get_Weight() weight.set_valueOf_(str(int(new_size)*10)) memory.set_Weight(weight) memory_string = CommonUtils.convertPythonObjToStr(memory, 'Memory') Log.debug(self.logger, "memory: \n%s" % memory_string) output = StringIO() memory.export(output, 0, name_ = 'Item', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"', pretty_print = True) body = output.getvalue().\ replace('Info msgid=""', "ovf:Info").replace("/Info", "/ovf:Info").\ replace("vmw:", "").replace("class:", "rasd:").replace("ResourceType", "rasd:ResourceType") headers = self.headers headers['Content-type'] = 'application/vnd.vmware.vcloud.rasdItem+xml' self.response = Http.put(href, data=body, headers=headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) else: raise Exception(self.response.status_code) raise Exception('can\'t find vm') def modify_vm_cpu(self, vm_name, cpus): """ Modify the virtual CPU allocation for VM. :param vm_name: (str): The name of the vm to be customized. :param cpus: (int): The number of virtual CPUs allocated to the VM. :return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n if the task cannot be created a debug level log message is generated detailing the reason. :raises: Exception: If the named VM cannot be located or another error occured. """ children = self.me.get_Children() if children: vms = [vm for vm in children.get_Vm() if vm.name == vm_name] if len(vms) == 1: sections = vm.get_Section() virtualHardwareSection = filter(lambda section: section.__class__.__name__== "VirtualHardwareSection_Type", sections)[0] items = virtualHardwareSection.get_Item() cpu = filter(lambda item: (item.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href') != None and item.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href').endswith('/virtualHardwareSection/cpu')), items)[0] href = cpu.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href') en = cpu.get_ElementName() en.set_valueOf_('%s virtual CPU(s)' % cpus) cpu.set_ElementName(en) vq = cpu.get_VirtualQuantity() vq.set_valueOf_(cpus) cpu.set_VirtualQuantity(vq) cpu_string = CommonUtils.convertPythonObjToStr(cpu, 'CPU') output = StringIO() cpu.export(output, 0, name_ = 'Item', namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"', pretty_print = True) body = output.getvalue().\ replace('Info msgid=""', "ovf:Info").replace("/Info", "/ovf:Info").\ replace("vmw:", "").replace("class:", "rasd:").replace("ResourceType", "rasd:ResourceType") headers = self.headers headers['Content-type'] = 'application/vnd.vmware.vcloud.rasdItem+xml' self.response = Http.put(href, data=body, headers=headers, verify=self.verify, logger=self.logger) if self.response.status_code == requests.codes.accepted: return taskType.parseString(self.response.content, True) else: raise Exception(self.response.status_code) raise Exception('can\'t find vm') def _get_vms(self): children = self.me.get_Children() if children: return children.get_Vm() else: return [] def _modify_networkConnectionSection(self, section, new_connection, primary_index=None): #Need to add same interface more than once for a VM , so commenting out below lines # for networkConnection in section.get_NetworkConnection(): # if (networkConnection.get_network().lower() == # new_connection.get_network().lower()): # return (False, # "VApp {0} is already connected to org vdc network {1}" # .format(self.name, networkConnection.get_network())) section.add_NetworkConnection(new_connection) if section.get_Info() is None: info = vcloudType.Msg_Type() info.set_valueOf_("Network connection") section.set_Info(info) if primary_index is not None: section.set_PrimaryNetworkConnectionIndex(primary_index) def _create_networkConnection(self, network_name, index, ip_allocation_mode, mac_address=None, ip_address=None): networkConnection = vcloudType.NetworkConnectionType() networkConnection.set_network(network_name) networkConnection.set_NetworkConnectionIndex(index) networkConnection.set_IpAddressAllocationMode(ip_allocation_mode) networkConnection.set_IsConnected(True) if ip_address and ip_allocation_mode == 'MANUAL': networkConnection.set_IpAddress(ip_address) if mac_address: networkConnection.set_MACAddress(mac_address) return networkConnection
# Licensed under a 3-clause BSD style license - see LICENSE.rst import operator __all__ = ['BST'] class MaxValue: ''' Represents an infinite value for purposes of tuple comparison. ''' def __gt__(self, other): return True def __ge__(self, other): return True def __lt__(self, other): return False def __le__(self, other): return False def __repr__(self): return "MAX" __str__ = __repr__ class MinValue: ''' The opposite of MaxValue, i.e. a representation of negative infinity. ''' def __lt__(self, other): return True def __le__(self, other): return True def __gt__(self, other): return False def __ge__(self, other): return False def __repr__(self): return "MIN" __str__ = __repr__ class Epsilon: ''' Represents the "next largest" version of a given value, so that for all valid comparisons we have x < y < Epsilon(y) < z whenever x < y < z and x, z are not Epsilon objects. Parameters ---------- val : object Original value ''' __slots__ = ('val',) def __init__(self, val): self.val = val def __lt__(self, other): if self.val == other: return False return self.val < other def __gt__(self, other): if self.val == other: return True return self.val > other def __eq__(self, other): return False def __repr__(self): return repr(self.val) + " + epsilon" class Node: ''' An element in a binary search tree, containing a key, data, and references to children nodes and a parent node. Parameters ---------- key : tuple Node key data : list or int Node data ''' __lt__ = lambda x, y: x.key < y.key __le__ = lambda x, y: x.key <= y.key __eq__ = lambda x, y: x.key == y.key __ge__ = lambda x, y: x.key >= y.key __gt__ = lambda x, y: x.key > y.key __ne__ = lambda x, y: x.key != y.key __slots__ = ('key', 'data', 'left', 'right') # each node has a key and data list def __init__(self, key, data): self.key = key self.data = data if isinstance(data, list) else [data] self.left = None self.right = None def replace(self, child, new_child): ''' Replace this node's child with a new child. ''' if self.left is not None and self.left == child: self.left = new_child elif self.right is not None and self.right == child: self.right = new_child else: raise ValueError("Cannot call replace() on non-child") def remove(self, child): ''' Remove the given child. ''' self.replace(child, None) def set(self, other): ''' Copy the given node. ''' self.key = other.key self.data = other.data[:] def __str__(self): return str((self.key, self.data)) def __repr__(self): return str(self) class BST: ''' A basic binary search tree in pure Python, used as an engine for indexing. Parameters ---------- data : Table Sorted columns of the original table row_index : Column object Row numbers corresponding to data columns unique : bool (defaults to False) Whether the values of the index must be unique ''' NodeClass = Node def __init__(self, data, row_index, unique=False): self.root = None self.size = 0 self.unique = unique for key, row in zip(data, row_index): self.add(tuple(key), row) def add(self, key, data=None): ''' Add a key, data pair. ''' if data is None: data = key self.size += 1 node = self.NodeClass(key, data) curr_node = self.root if curr_node is None: self.root = node return while True: if node < curr_node: if curr_node.left is None: curr_node.left = node break curr_node = curr_node.left elif node > curr_node: if curr_node.right is None: curr_node.right = node break curr_node = curr_node.right elif self.unique: raise ValueError("Cannot insert non-unique value") else: # add data to node curr_node.data.extend(node.data) curr_node.data = sorted(curr_node.data) return def find(self, key): ''' Return all data values corresponding to a given key. Parameters ---------- key : tuple Input key Returns ------- data_vals : list List of rows corresponding to the input key ''' node, parent = self.find_node(key) return node.data if node is not None else [] def find_node(self, key): ''' Find the node associated with the given key. ''' if self.root is None: return (None, None) return self._find_recursive(key, self.root, None) def shift_left(self, row): ''' Decrement all rows larger than the given row. ''' for node in self.traverse(): node.data = [x - 1 if x > row else x for x in node.data] def shift_right(self, row): ''' Increment all rows greater than or equal to the given row. ''' for node in self.traverse(): node.data = [x + 1 if x >= row else x for x in node.data] def _find_recursive(self, key, node, parent): try: if key == node.key: return (node, parent) elif key > node.key: if node.right is None: return (None, None) return self._find_recursive(key, node.right, node) else: if node.left is None: return (None, None) return self._find_recursive(key, node.left, node) except TypeError: # wrong key type return (None, None) def traverse(self, order='inorder'): ''' Return nodes of the BST in the given order. Parameters ---------- order : str The order in which to recursively search the BST. Possible values are: "preorder": current node, left subtree, right subtree "inorder": left subtree, current node, right subtree "postorder": left subtree, right subtree, current node ''' if order == 'preorder': return self._preorder(self.root, []) elif order == 'inorder': return self._inorder(self.root, []) elif order == 'postorder': return self._postorder(self.root, []) raise ValueError(f"Invalid traversal method: \"{order}\"") def items(self): ''' Return BST items in order as (key, data) pairs. ''' return [(x.key, x.data) for x in self.traverse()] def sort(self): ''' Make row order align with key order. ''' i = 0 for node in self.traverse(): num_rows = len(node.data) node.data = [x for x in range(i, i + num_rows)] i += num_rows def sorted_data(self): ''' Return BST rows sorted by key values. ''' return [x for node in self.traverse() for x in node.data] def _preorder(self, node, lst): if node is None: return lst lst.append(node) self._preorder(node.left, lst) self._preorder(node.right, lst) return lst def _inorder(self, node, lst): if node is None: return lst self._inorder(node.left, lst) lst.append(node) self._inorder(node.right, lst) return lst def _postorder(self, node, lst): if node is None: return lst self._postorder(node.left, lst) self._postorder(node.right, lst) lst.append(node) return lst def _substitute(self, node, parent, new_node): if node is self.root: self.root = new_node else: parent.replace(node, new_node) def remove(self, key, data=None): ''' Remove data corresponding to the given key. Parameters ---------- key : tuple The key to remove data : int or None If None, remove the node corresponding to the given key. If not None, remove only the given data value from the node. Returns ------- successful : bool True if removal was successful, false otherwise ''' node, parent = self.find_node(key) if node is None: return False if data is not None: if data not in node.data: raise ValueError("Data does not belong to correct node") elif len(node.data) > 1: node.data.remove(data) return True if node.left is None and node.right is None: self._substitute(node, parent, None) elif node.left is None and node.right is not None: self._substitute(node, parent, node.right) elif node.right is None and node.left is not None: self._substitute(node, parent, node.left) else: # find largest element of left subtree curr_node = node.left parent = node while curr_node.right is not None: parent = curr_node curr_node = curr_node.right self._substitute(curr_node, parent, curr_node.left) node.set(curr_node) self.size -= 1 return True def is_valid(self): ''' Returns whether this is a valid BST. ''' return self._is_valid(self.root) def _is_valid(self, node): if node is None: return True return (node.left is None or node.left <= node) and \ (node.right is None or node.right >= node) and \ self._is_valid(node.left) and self._is_valid(node.right) def range(self, lower, upper, bounds=(True, True)): ''' Return all nodes with keys in the given range. Parameters ---------- lower : tuple Lower bound upper : tuple Upper bound bounds : tuple (x, y) of bools Indicates whether the search should be inclusive or exclusive with respect to the endpoints. The first argument x corresponds to an inclusive lower bound, and the second argument y to an inclusive upper bound. ''' nodes = self.range_nodes(lower, upper, bounds) return [x for node in nodes for x in node.data] def range_nodes(self, lower, upper, bounds=(True, True)): ''' Return nodes in the given range. ''' if self.root is None: return [] # op1 is <= or <, op2 is >= or > op1 = operator.le if bounds[0] else operator.lt op2 = operator.ge if bounds[1] else operator.gt return self._range(lower, upper, op1, op2, self.root, []) def same_prefix(self, val): ''' Assuming the given value has smaller length than keys, return nodes whose keys have this value as a prefix. ''' if self.root is None: return [] nodes = self._same_prefix(val, self.root, []) return [x for node in nodes for x in node.data] def _range(self, lower, upper, op1, op2, node, lst): if op1(lower, node.key) and op2(upper, node.key): lst.append(node) if upper > node.key and node.right is not None: self._range(lower, upper, op1, op2, node.right, lst) if lower < node.key and node.left is not None: self._range(lower, upper, op1, op2, node.left, lst) return lst def _same_prefix(self, val, node, lst): prefix = node.key[:len(val)] if prefix == val: lst.append(node) if prefix <= val and node.right is not None: self._same_prefix(val, node.right, lst) if prefix >= val and node.left is not None: self._same_prefix(val, node.left, lst) return lst def __repr__(self): return f'<{self.__class__.__name__}>' def _print(self, node, level): line = '\t' * level + str(node) + '\n' if node.left is not None: line += self._print(node.left, level + 1) if node.right is not None: line += self._print(node.right, level + 1) return line @property def height(self): ''' Return the BST height. ''' return self._height(self.root) def _height(self, node): if node is None: return -1 return max(self._height(node.left), self._height(node.right)) + 1 def replace_rows(self, row_map): ''' Replace all rows with the values they map to in the given dictionary. Any rows not present as keys in the dictionary will have their nodes deleted. Parameters ---------- row_map : dict Mapping of row numbers to new row numbers ''' for key, data in self.items(): data[:] = [row_map[x] for x in data if x in row_map]
# Copyright 2007 Divmod, Inc. # See LICENSE file for details """ Tests for L{xmantissa.webnav}. """ from twisted.trial import unittest from epsilon.structlike import record from axiom.store import Store from axiom.dependency import installOn from nevow.url import URL from nevow import tags, context from nevow.testutil import FakeRequest from xmantissa import webnav from xmantissa.webapp import PrivateApplication class FakeNavigator(record('tabs')): def getTabs(self): return self.tabs class NavConfigTests(unittest.TestCase): """ Tests for free functions in L{xmantissa.webnav}. """ def test_tabMerge(self): """ L{webnav.getTabs} should combine tabs from the L{INavigableElement} providers passed to it into a single structure. It should preserve the attributes of all of the tabs and order them and their children by priority. """ nav = webnav.getTabs([ FakeNavigator([webnav.Tab('Hello', 1, 0.5, [webnav.Tab('Super', 2, 1.0, (), False, '/Super/2'), webnav.Tab('Mega', 3, 0.5, (), False, '/Mega/3')], False, '/Hello/1')]), FakeNavigator([webnav.Tab('Hello', 4, 1., [webnav.Tab('Ultra', 5, 0.75, (), False, '/Ultra/5'), webnav.Tab('Hyper', 6, 0.25, (), False, '/Hyper/6')], True, '/Hello/4'), webnav.Tab('Goodbye', 7, 0.9, (), True, '/Goodbye/7')])]) hello, goodbye = nav self.assertEqual(hello.name, 'Hello') self.assertEqual(hello.storeID, 4) self.assertEqual(hello.priority, 1.0) self.assertEqual(hello.authoritative,True) self.assertEqual(hello.linkURL, '/Hello/4') super, ultra, mega, hyper = hello.children self.assertEqual(super.name, 'Super') self.assertEqual(super.storeID, 2) self.assertEqual(super.priority, 1.0) self.assertEqual(super.authoritative, False) self.assertEqual(super.linkURL, '/Super/2') self.assertEqual(ultra.name, 'Ultra') self.assertEqual(ultra.storeID, 5) self.assertEqual(ultra.priority, 0.75) self.assertEqual(ultra.authoritative, False) self.assertEqual(ultra.linkURL, '/Ultra/5') self.assertEqual(mega.name, 'Mega') self.assertEqual(mega.storeID, 3) self.assertEqual(mega.priority, 0.5) self.assertEqual(mega.authoritative, False) self.assertEqual(mega.linkURL, '/Mega/3') self.assertEqual(hyper.name, 'Hyper') self.assertEqual(hyper.storeID, 6) self.assertEqual(hyper.priority, 0.25) self.assertEqual(hyper.authoritative, False) self.assertEqual(hyper.linkURL, '/Hyper/6') self.assertEqual(goodbye.name, 'Goodbye') self.assertEqual(goodbye.storeID, 7) self.assertEqual(goodbye.priority, 0.9) self.assertEqual(goodbye.authoritative, True) self.assertEqual(goodbye.linkURL, '/Goodbye/7') def test_setTabURLs(self): """ Check that L{webnav.setTabURLs} correctly sets the C{linkURL} attribute of L{webnav.Tab} instances to the result of passing tab.storeID to L{xmantissa.ixmantissa.IWebTranslator.linkTo} if C{linkURL} is not set, and that it leaves it alone if it is """ s = Store() privapp = PrivateApplication(store=s) installOn(privapp,s) tabs = [webnav.Tab('PrivateApplication', privapp.storeID, 0), webnav.Tab('Something Else', None, 0, linkURL='/foo/bar')] webnav.setTabURLs(tabs, privapp) self.assertEqual(tabs[0].linkURL, privapp.linkTo(privapp.storeID)) self.assertEqual(tabs[1].linkURL, '/foo/bar') def test_getSelectedTabExactMatch(self): """ Check that L{webnav.getSelectedTab} returns the tab whose C{linkURL} attribute exactly matches the path of the L{nevow.url.URL} it is passed """ tabs = list(webnav.Tab(str(i), None, 0, linkURL='/' + str(i)) for i in xrange(5)) for (i, tab) in enumerate(tabs): selected = webnav.getSelectedTab(tabs, URL.fromString(tab.linkURL)) self.assertIdentical(selected, tab) selected = webnav.getSelectedTab(tabs, URL.fromString('/XYZ')) self.failIf(selected) def test_getSelectedTabPrefixMatch(self): """ Check that L{webnav.getSelectedTab} returns the tab whose C{linkURL} attribute contains the longest prefix of path segments that appears at the beginning of the L{nevow.url.URL} it is passed (if there is not an exact match) """ tabs = [webnav.Tab('thing1', None, 0, linkURL='/a/b/c/d'), webnav.Tab('thing2', None, 0, linkURL='/a/b/c')] def assertSelected(tab): selected = webnav.getSelectedTab(tabs, URL.fromString('/a/b/c/d/e')) self.assertIdentical(selected, tab) assertSelected(tabs[0]) tabs.reverse() assertSelected(tabs[1]) tabs.append(webnav.Tab('thing3', None, 0, linkURL='a/b/c/e/e')) assertSelected(tabs[1]) t = webnav.Tab('thing4', None, 0, linkURL='/a/b/c/d/e') tabs.append(t) assertSelected(t) class FakeTranslator(object): """ A dumb translator which follows a very simple translation rule and can only translate in one direction. """ def linkTo(self, obj): """ Return a fake link based on the given object. """ return '/link/' + str(obj) class RendererTests(unittest.TestCase): """ Tests for certain free functions in L{xmantissa.webnav} which render different things. """ def test_startMenuSetsTabURLs(self): """ L{Tabs<Tab>} which have C{None} for a C{linkURL} attribute should have a value set for that attribute based on the L{IWebTranslator} passed to L{startMenu}. """ tab = webnav.Tab('alpha', 123, 0) webnav.startMenu(FakeTranslator(), [tab], tags.span()) self.assertEqual(tab.linkURL, '/link/123') def test_startMenuRenders(self): """ Test that the L{startMenu} renderer creates a tag for each tab, filling its I{href}, I{name}, and I{kids} slots. """ tabs = [ webnav.Tab('alpha', 123, 0), webnav.Tab('beta', 234, 0)] node = tags.span[tags.div(pattern='tab')] tag = webnav.startMenu(FakeTranslator(), tabs, node) self.assertEqual(tag.tagName, 'span') navTags = list(tag.slotData['tabs']) self.assertEqual(len(navTags), 2) alpha, beta = navTags self.assertEqual(alpha.slotData['name'], 'alpha') self.assertEqual(alpha.slotData['href'], '/link/123') self.assertEqual(alpha.slotData['kids'], '') self.assertEqual(beta.slotData['name'], 'beta') self.assertEqual(beta.slotData['href'], '/link/234') self.assertEqual(beta.slotData['kids'], '') def test_settingsLink(self): """ L{settingsLink} should add a link to the settings item supplied as a child of the tag supplied. """ self.storeID = 123 node = tags.span() tag = webnav.settingsLink(FakeTranslator(), self, node) self.assertEqual(tag.tagName, 'span') self.assertEqual(tag.children, ['/link/123']) def _renderAppNav(self, tabs, template=None): """ Render application navigation and return the resulting tag. @param template: a Tag containing a template for navigation. """ if template is None: template = tags.span[ tags.div(pattern='app-tab'), tags.div(pattern='tab-contents')] ctx = context.WebContext(tag=template) request = FakeRequest() ctx.remember(request) return webnav.applicationNavigation(ctx, FakeTranslator(), tabs) def test_applicationNavigation(self): """ Test that the L{applicationNavigation} renderer creates a tag for each tab, fillings I{name} and I{tab-contents} slots. """ tag = self._renderAppNav([ webnav.Tab('alpha', 123, 0), webnav.Tab('beta', 234, 0)]) self.assertEqual(tag.tagName, 'span') navTags = list(tag.slotData['tabs']) self.assertEqual(len(navTags), 2) alpha, beta = navTags self.assertEqual(alpha.slotData['name'], 'alpha') alphaContents = alpha.slotData['tab-contents'] self.assertEqual(alphaContents.slotData['href'], '/link/123') self.assertEqual(beta.slotData['name'], 'beta') betaContents = beta.slotData['tab-contents'] self.assertEqual(betaContents.slotData['href'], '/link/234') def test_applicationNavigationChildren(self): """ The L{applicationNavigation} renderer should fill the 'subtabs' slot with copies of the 'subtab' pattern for each tab, if that pattern is present. (This is only tested to one level of depth because we currently only support one level of depth.) """ tag = self._renderAppNav( [webnav.Tab('alpha', 123, 0), webnav.Tab('beta', 234, 0, children=[ webnav.Tab('gamma', 345, 0), webnav.Tab('delta', 456, 0)])], tags.span[tags.div(pattern='app-tab'), tags.div(pattern='tab-contents'), tags.div(pattern='subtab'), tags.div(pattern='subtab-contents', class_='subtab-contents-class')]) navTags = list(tag.slotData['tabs']) self.assertEqual(len(navTags), 2) alpha, beta = navTags self.assertEqual(alpha.slotData['subtabs'], []) self.assertEqual(len(beta.slotData['subtabs']), 2) subtab1 = beta.slotData['subtabs'][0] self.assertEqual(subtab1.slotData['name'], 'gamma') self.assertEqual(subtab1.slotData['href'], '/link/345') self.assertEqual(subtab1.slotData['tab-contents'].attributes['class'], 'subtab-contents-class') subtab2 = beta.slotData['subtabs'][1] self.assertEqual(subtab2.slotData['name'], 'delta') self.assertEqual(subtab2.slotData['href'], '/link/456') self.assertEqual(subtab2.slotData['tab-contents'].attributes['class'], 'subtab-contents-class') def test_applicationNavigationMissingSubtabsPattern(self): """ The L{applicationNavigation} renderer should fill the 'subtabs' slot with the empty list if the 'subtabs' pattern is not found. This is to ensure that it remains compatible with older customized 'shell' templates. """ tag = self._renderAppNav([ webnav.Tab("alpha", 123, 0, children=[webnav.Tab("beta", 234, 0)])]) navTags = list(tag.slotData['tabs']) self.assertEqual(navTags[0].slotData['subtabs'], [])
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conformance tests for retry. Verifies correct behavior around retryable errors, idempotency and preconditions.""" import functools import logging import os import subprocess import tempfile import time import uuid import pytest import requests import urllib from google.auth.credentials import AnonymousCredentials from google.cloud import storage from google.cloud.storage.hmac_key import HMACKeyMetadata from . import _read_local_json _CONFORMANCE_TESTS = _read_local_json("retry_strategy_test_data.json")["retryTests"] """Environment variable or default host for Storage testbench emulator.""" _HOST = os.environ.get("STORAGE_EMULATOR_HOST", "http://localhost:9000") _PORT = urllib.parse.urlsplit(_HOST).port """The storage testbench docker image info and commands.""" _DEFAULT_IMAGE_NAME = "gcr.io/cloud-devrel-public-resources/storage-testbench" _DEFAULT_IMAGE_TAG = "latest" _DOCKER_IMAGE = "{}:{}".format(_DEFAULT_IMAGE_NAME, _DEFAULT_IMAGE_TAG) _PULL_CMD = ["docker", "pull", _DOCKER_IMAGE] _RUN_CMD = ["docker", "run", "--rm", "-d", "-p", "{}:9000".format(_PORT), _DOCKER_IMAGE] _CONF_TEST_PROJECT_ID = "my-project-id" _CONF_TEST_SERVICE_ACCOUNT_EMAIL = ( "my-service-account@my-project-id.iam.gserviceaccount.com" ) _CONF_TEST_PUBSUB_TOPIC_NAME = "my-topic-name" _STRING_CONTENT = "hello world" _BYTE_CONTENT = b"12345678" _RESUMABLE_UPLOAD_CHUNK_SIZE = 2 * 1024 * 1024 ######################################################################################################################################## ### Library methods for mapping ######################################################################################################## ######################################################################################################################################## def bucket_get_blob(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") bucket = client.bucket(bucket.name) bucket.get_blob(object.name) def blob_exists(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) blob.exists() def blob_download_as_bytes(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") # download the file and assert data integrity blob = client.bucket(bucket.name).blob(file.name) stored_contents = blob.download_as_bytes() assert stored_contents == data.encode("utf-8") def blob_download_as_bytes_w_range(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(file.name) start_byte = 0 end_byte = 1000000 stored_contents = blob.download_as_bytes(start=start_byte, end=end_byte - 1) assert stored_contents == data.encode("utf-8")[start_byte:end_byte] def blob_download_as_text(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(file.name) stored_contents = blob.download_as_text() assert stored_contents == data def blob_download_to_filename(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(file.name) with tempfile.NamedTemporaryFile() as temp_f: blob.download_to_filename(temp_f.name) with open(temp_f.name, "r") as file_obj: stored_contents = file_obj.read() assert stored_contents == data def blob_download_to_filename_chunked(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(file.name, chunk_size=40 * 1024 * 1024) with tempfile.NamedTemporaryFile() as temp_f: blob.download_to_filename(temp_f.name) with open(temp_f.name, "r") as file_obj: stored_contents = file_obj.read() assert stored_contents == data def client_download_blob_to_file(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(file.name) with tempfile.NamedTemporaryFile() as temp_f: with open(temp_f.name, "wb") as file_obj: client.download_blob_to_file(blob, file_obj) with open(temp_f.name, "r") as to_read: stored_contents = to_read.read() assert stored_contents == data def blobreader_read(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(file.name) with blob.open(mode="r") as reader: stored_contents = reader.read() assert stored_contents == data def client_list_blobs(client, _preconditions, **resources): bucket = resources.get("bucket") blobs = client.list_blobs(bucket.name) for b in blobs: pass def bucket_list_blobs(client, _preconditions, **resources): bucket = resources.get("bucket") blobs = client.bucket(bucket.name).list_blobs() for b in blobs: pass def bucket_delete(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.delete(force=True) def bucket_reload(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.reload() def client_get_bucket(client, _preconditions, **resources): client.get_bucket(resources.get("bucket").name) def client_lookup_bucket(client, _preconditions, **resources): client.lookup_bucket(resources.get("bucket").name) def bucket_exists(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.exists() def client_create_bucket(client, _preconditions, **_): bucket = client.bucket(uuid.uuid4().hex) client.create_bucket(bucket) def bucket_create(client, _preconditions, **_): bucket = client.bucket(uuid.uuid4().hex) bucket.create() def client_list_buckets(client, _preconditions, **_): buckets = client.list_buckets() for b in buckets: pass def bucket_get_iam_policy(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.get_iam_policy() def bucket_test_iam_permissions(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) permissions = ["storage.buckets.get", "storage.buckets.create"] bucket.test_iam_permissions(permissions) def bucket_lock_retention_policy(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.retention_period = 60 bucket.patch() bucket.lock_retention_policy() def client_get_service_account_email(client, _preconditions, **_): client.get_service_account_email() def notification_create(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) notification = bucket.notification(topic_name=_CONF_TEST_PUBSUB_TOPIC_NAME) notification.create() def bucket_list_notifications(client, _preconditions, **resources): bucket = resources.get("bucket") notifications = client.bucket(bucket.name).list_notifications() for n in notifications: pass def bucket_get_notification(client, _preconditions, **resources): bucket = resources.get("bucket") notification = resources.get("notification") client.bucket(bucket.name).get_notification(notification.notification_id) def notification_reload(client, _preconditions, **resources): notification = client.bucket(resources.get("bucket").name).notification( notification_id=resources.get("notification").notification_id ) notification.reload() def notification_exists(client, _preconditions, **resources): notification = client.bucket(resources.get("bucket").name).notification( notification_id=resources.get("notification").notification_id ) notification.exists() def notification_delete(client, _preconditions, **resources): notification = client.bucket(resources.get("bucket").name).notification( notification_id=resources.get("notification").notification_id ) notification.delete() def client_list_hmac_keys(client, _preconditions, **_): hmac_keys = client.list_hmac_keys() for k in hmac_keys: pass def client_get_hmac_key_metadata(client, _preconditions, **resources): access_id = resources.get("hmac_key").access_id client.get_hmac_key_metadata(access_id=access_id) def hmac_key_exists(client, _preconditions, **resources): access_id = resources.get("hmac_key").access_id hmac_key = HMACKeyMetadata(client, access_id=access_id) hmac_key.exists() def hmac_key_reload(client, _preconditions, **resources): access_id = resources.get("hmac_key").access_id hmac_key = HMACKeyMetadata(client, access_id=access_id) hmac_key.reload() def hmac_key_delete(client, _preconditions, **resources): access_id = resources.get("hmac_key").access_id hmac_key = HMACKeyMetadata(client, access_id=access_id) hmac_key.state = "INACTIVE" hmac_key.update() hmac_key.delete() def client_create_hmac_key(client, _preconditions, **_): client.create_hmac_key(service_account_email=_CONF_TEST_SERVICE_ACCOUNT_EMAIL) def hmac_key_update(client, _preconditions, **resources): access_id = resources.get("hmac_key").access_id etag = resources.get("hmac_key").etag hmac_key = HMACKeyMetadata(client, access_id=access_id) if _preconditions: pytest.skip("Etag is not yet supported") hmac_key.etag = etag hmac_key.state = "INACTIVE" hmac_key.update() def bucket_patch(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) metageneration = resources.get("bucket").metageneration bucket.storage_class = "COLDLINE" if _preconditions: bucket.patch(if_metageneration_match=metageneration) else: bucket.patch() def bucket_update(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) metageneration = resources.get("bucket").metageneration bucket._properties = {"storageClass": "STANDARD"} if _preconditions: bucket.update(if_metageneration_match=metageneration) else: bucket.update() def bucket_set_iam_policy(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) role = "roles/storage.objectViewer" member = _CONF_TEST_SERVICE_ACCOUNT_EMAIL policy = bucket.get_iam_policy(requested_policy_version=3) policy.bindings.append({"role": role, "members": {member}}) if _preconditions: bucket.set_iam_policy(policy) else: # IAM policies have no metageneration: clear ETag to avoid checking that it matches. policy.etag = None bucket.set_iam_policy(policy) def bucket_delete_blob(client, _preconditions, **resources): object = resources.get("object") bucket = client.bucket(resources.get("bucket").name) if _preconditions: generation = object.generation bucket.delete_blob(object.name, if_generation_match=generation) else: bucket.delete_blob(object.name) def bucket_delete_blobs(client, _preconditions, **resources): object = resources.get("object") bucket = client.bucket(resources.get("bucket").name) sources = [object] source_generations = [object.generation] if _preconditions: bucket.delete_blobs(sources, if_generation_match=source_generations) else: bucket.delete_blobs(sources) def blob_delete(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) if _preconditions: blob.delete(if_generation_match=object.generation) else: blob.delete() def blob_patch(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) blob.metadata = {"foo": "bar"} if _preconditions: blob.patch(if_metageneration_match=object.metageneration) else: blob.patch() def blob_update(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) blob.metadata = {"foo": "bar"} if _preconditions: blob.update(if_metageneration_match=object.metageneration) else: blob.update() def bucket_copy_blob(client, _preconditions, **resources): object = resources.get("object") bucket = client.bucket(resources.get("bucket").name) destination = client.create_bucket(uuid.uuid4().hex) if _preconditions: bucket.copy_blob( object, destination, new_name=uuid.uuid4().hex, if_generation_match=0 ) else: bucket.copy_blob(object, destination) def bucket_rename_blob(client, _preconditions, **resources): object = resources.get("object") bucket = client.bucket(resources.get("bucket").name) blob = bucket.blob(resources.get("object").name) new_name = uuid.uuid4().hex if _preconditions: bucket.rename_blob( blob, new_name, if_generation_match=0, if_source_generation_match=object.generation, ) else: bucket.rename_blob(blob, new_name) def blob_rewrite(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") new_blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) new_blob.metadata = {"foo": "bar"} if _preconditions: new_blob.rewrite(object, if_generation_match=0) else: new_blob.rewrite(object) def blob_update_storage_class(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) storage_class = "STANDARD" if _preconditions: blob.update_storage_class(storage_class, if_generation_match=object.generation) else: blob.update_storage_class(storage_class) def blob_compose(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) blob_2 = bucket.blob(uuid.uuid4().hex) blob_2.upload_from_string(_STRING_CONTENT) sources = [blob_2] if _preconditions: blob.compose(sources, if_generation_match=object.generation) else: blob.compose(sources) def blob_upload_from_string(client, _preconditions, **resources): bucket = resources.get("bucket") _, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) blob.chunk_size = _RESUMABLE_UPLOAD_CHUNK_SIZE if _preconditions: blob.upload_from_string(data, if_generation_match=0) else: blob.upload_from_string(data) assert blob.size == len(data) def blob_upload_from_file(client, _preconditions, **resources): bucket = resources.get("bucket") file, data = resources.get("file_data") file_blob = client.bucket(bucket.name).blob(file.name) upload_blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) upload_blob.chunk_size = _RESUMABLE_UPLOAD_CHUNK_SIZE with tempfile.NamedTemporaryFile() as temp_f: # Create a named temporary file with payload. with open(temp_f.name, "wb") as file_obj: client.download_blob_to_file(file_blob, file_obj) # Upload the temporary file and assert data integrity. if _preconditions: upload_blob.upload_from_file(temp_f, if_generation_match=0) else: upload_blob.upload_from_file(temp_f) upload_blob.reload() assert upload_blob.size == len(data) def blob_upload_from_filename(client, _preconditions, **resources): bucket = resources.get("bucket") blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) blob.chunk_size = _RESUMABLE_UPLOAD_CHUNK_SIZE bucket = resources.get("bucket") file, data = resources.get("file_data") file_blob = client.bucket(bucket.name).blob(file.name) upload_blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) upload_blob.chunk_size = _RESUMABLE_UPLOAD_CHUNK_SIZE with tempfile.NamedTemporaryFile() as temp_f: # Create a named temporary file with payload. with open(temp_f.name, "wb") as file_obj: client.download_blob_to_file(file_blob, file_obj) # Upload the temporary file and assert data integrity. if _preconditions: upload_blob.upload_from_filename(temp_f.name, if_generation_match=0) else: upload_blob.upload_from_filename(temp_f.name) upload_blob.reload() assert upload_blob.size == len(data) def blobwriter_write(client, _preconditions, **resources): bucket = resources.get("bucket") _, data = resources.get("file_data") blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) if _preconditions: with blob.open( "w", chunk_size=_RESUMABLE_UPLOAD_CHUNK_SIZE, if_generation_match=0 ) as writer: writer.write(data) else: with blob.open("w", chunk_size=_RESUMABLE_UPLOAD_CHUNK_SIZE) as writer: writer.write(data) blob.reload() assert blob.size == len(data) def blobwriter_write_multipart(client, _preconditions, **resources): chunk_size = 256 * 1024 bucket = resources.get("bucket") blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) if _preconditions: with blob.open("wb", chunk_size=chunk_size, if_generation_match=0) as writer: writer.write(_BYTE_CONTENT) else: with blob.open("wb", chunk_size=chunk_size) as writer: writer.write(_BYTE_CONTENT) def blob_upload_from_string_multipart(client, _preconditions, **resources): bucket = resources.get("bucket") blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) if _preconditions: blob.upload_from_string(_STRING_CONTENT, if_generation_match=0) else: blob.upload_from_string(_STRING_CONTENT) def blob_create_resumable_upload_session(client, _preconditions, **resources): bucket = resources.get("bucket") blob = client.bucket(bucket.name).blob(uuid.uuid4().hex) if _preconditions: blob.create_resumable_upload_session(if_generation_match=0) else: blob.create_resumable_upload_session() def blob_make_private(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) if _preconditions: blob.make_private(if_metageneration_match=object.metageneration) else: blob.make_private() def blob_make_public(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) if _preconditions: blob.make_public(if_metageneration_match=object.metageneration) else: blob.make_public() def bucket_make_private(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) if _preconditions: bucket.make_private( if_metageneration_match=resources.get("bucket").metageneration ) else: bucket.make_private() def bucket_make_public(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) if _preconditions: bucket.make_public( if_metageneration_match=resources.get("bucket").metageneration ) else: bucket.make_public() def bucket_acl_reload(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.acl.reload() def bucket_acl_save(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.acl.user(_CONF_TEST_SERVICE_ACCOUNT_EMAIL).grant_owner() if _preconditions: bucket.acl.save(if_metageneration_match=resources.get("bucket").metageneration) else: bucket.acl.save() def bucket_acl_save_predefined(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) if _preconditions: bucket.acl.save_predefined( "bucketOwnerFullControl", if_metageneration_match=resources.get("bucket").metageneration, ) else: bucket.acl.save_predefined("bucketOwnerFullControl") def bucket_acl_clear(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) if _preconditions: bucket.acl.clear(if_metageneration_match=resources.get("bucket").metageneration) else: bucket.acl.clear() def default_object_acl_reload(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.default_object_acl.reload() def default_object_acl_save(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) bucket.default_object_acl.user(_CONF_TEST_SERVICE_ACCOUNT_EMAIL).grant_owner() if _preconditions: bucket.default_object_acl.save( if_metageneration_match=resources.get("bucket").metageneration ) else: bucket.default_object_acl.save() def default_object_acl_save_predefined(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) if _preconditions: bucket.default_object_acl.save_predefined( "bucketOwnerFullControl", if_metageneration_match=resources.get("bucket").metageneration, ) else: bucket.default_object_acl.save_predefined("bucketOwnerFullControl") def default_object_acl_clear(client, _preconditions, **resources): bucket = client.bucket(resources.get("bucket").name) if _preconditions: bucket.default_object_acl.clear( if_metageneration_match=resources.get("bucket").metageneration ) else: bucket.default_object_acl.clear() def object_acl_reload(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) blob.acl.reload() def object_acl_save(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) blob.acl.user(_CONF_TEST_SERVICE_ACCOUNT_EMAIL).grant_owner() if _preconditions: blob.acl.save(if_metageneration_match=object.metageneration) else: blob.acl.save() def object_acl_save_predefined(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) if _preconditions: blob.acl.save_predefined( "bucketOwnerFullControl", if_metageneration_match=object.metageneration ) else: blob.acl.save_predefined("bucketOwnerFullControl") def object_acl_clear(client, _preconditions, **resources): bucket = resources.get("bucket") object = resources.get("object") blob = client.bucket(bucket.name).blob(object.name) if _preconditions: blob.acl.clear(if_metageneration_match=object.metageneration) else: blob.acl.clear() ######################################################################################################################################## ### Method Invocation Mapping ########################################################################################################## ######################################################################################################################################## # Method invocation mapping is a map whose keys are a string describing a standard # API call (e.g. storage.objects.get) and values are a list of functions which # wrap library methods that implement these calls. There may be multiple values # because multiple library methods may use the same call (e.g. get could be a # read or just a metadata get). method_mapping = { "storage.bucket_acl.list": [bucket_acl_reload], # S1 start "storage.buckets.delete": [bucket_delete], "storage.buckets.get": [ client_get_bucket, bucket_reload, client_lookup_bucket, bucket_exists, ], "storage.buckets.getIamPolicy": [bucket_get_iam_policy], "storage.buckets.insert": [client_create_bucket, bucket_create], "storage.buckets.list": [client_list_buckets], "storage.buckets.lockRetentionPolicy": [bucket_lock_retention_policy], "storage.buckets.testIamPermissions": [bucket_test_iam_permissions], "storage.default_object_acl.list": [default_object_acl_reload], "storage.hmacKey.delete": [hmac_key_delete], "storage.hmacKey.get": [ client_get_hmac_key_metadata, hmac_key_exists, hmac_key_reload, ], "storage.hmacKey.list": [client_list_hmac_keys], "storage.notifications.delete": [notification_delete], "storage.notifications.get": [ bucket_get_notification, notification_exists, notification_reload, ], "storage.notifications.list": [bucket_list_notifications], "storage.object_acl.list": [object_acl_reload], "storage.objects.get": [ bucket_get_blob, blob_exists, client_download_blob_to_file, blob_download_to_filename, blob_download_to_filename_chunked, blob_download_as_bytes, blob_download_as_text, blobreader_read, ], "storage.objects.download": [ client_download_blob_to_file, blob_download_to_filename, blob_download_to_filename_chunked, blob_download_as_bytes, blob_download_as_bytes_w_range, blob_download_as_text, blobreader_read, ], "storage.objects.list": [client_list_blobs, bucket_list_blobs, bucket_delete], "storage.serviceaccount.get": [client_get_service_account_email], # S1 end "storage.buckets.patch": [ bucket_patch, bucket_make_public, bucket_make_private, bucket_acl_save, bucket_acl_save_predefined, bucket_acl_clear, default_object_acl_save, default_object_acl_save_predefined, default_object_acl_clear, ], # S2/S3 start "storage.buckets.setIamPolicy": [bucket_set_iam_policy], "storage.buckets.update": [bucket_update], "storage.hmacKey.update": [hmac_key_update], "storage.objects.compose": [blob_compose], "storage.objects.copy": [bucket_copy_blob, bucket_rename_blob], "storage.objects.delete": [ bucket_delete_blob, bucket_delete_blobs, blob_delete, bucket_rename_blob, ], "storage.objects.insert": [ blob_upload_from_string_multipart, blobwriter_write_multipart, blob_create_resumable_upload_session, ], "storage.resumable.upload": [ blob_upload_from_string, blob_upload_from_file, blob_upload_from_filename, blobwriter_write, ], "storage.objects.patch": [ blob_patch, object_acl_save, object_acl_save_predefined, object_acl_clear, blob_make_private, blob_make_public, ], "storage.objects.rewrite": [blob_rewrite, blob_update_storage_class], "storage.objects.update": [blob_update], # S2/S3 end "storage.hmacKey.create": [client_create_hmac_key], # S4 start "storage.notifications.insert": [notification_create], } ######################################################################################################################################## ### Helper Methods for Testbench Retry Test API ######################################################################################## ######################################################################################################################################## """ The Retry Test API in the testbench is used to run the retry conformance tests. It offers a mechanism to describe more complex retry scenarios while sending a single, constant header through all the HTTP requests from a test program. The Retry Test API can be accessed by adding the path "/retry-test" to the host. See also: https://github.com/googleapis/storage-testbench """ def _create_retry_test(host, method_name, instructions): """ For each test case, initialize a Retry Test resource by loading a set of instructions to the testbench host. The instructions include an API method and a list of errors. An unique id is created for each Retry Test resource. """ import json retry_test_uri = host + "/retry_test" headers = { "Content-Type": "application/json", } data_dict = {"instructions": {method_name: instructions}} data = json.dumps(data_dict) r = requests.post(retry_test_uri, headers=headers, data=data) return r.json() def _get_retry_test(host, id): """ Retrieve the state of the Retry Test resource, including the unique id, instructions, and a boolean status "completed". This can be used to verify if all instructions were used as expected. """ get_retry_test_uri = "{base}{retry}/{id}".format( base=host, retry="/retry_test", id=id ) r = requests.get(get_retry_test_uri) return r.json() def _run_retry_test( host, id, lib_func, _preconditions, bucket, object, notification, hmac_key, file_data, ): """ To execute tests against the list of instrucions sent to the Retry Test API, create a client to send the retry test ID using the x-retry-test-id header in each request. For incoming requests that match the test ID and API method, the testbench will pop off the next instruction from the list and force the listed failure case. """ client = storage.Client( project=_CONF_TEST_PROJECT_ID, credentials=AnonymousCredentials(), client_options={"api_endpoint": host}, ) client._http.headers.update({"x-retry-test-id": id}) lib_func( client, _preconditions, bucket=bucket, object=object, notification=notification, hmac_key=hmac_key, file_data=file_data, ) def _delete_retry_test(host, id): """ Delete the Retry Test resource by id. """ get_retry_test_uri = "{base}{retry}/{id}".format( base=host, retry="/retry_test", id=id ) requests.delete(get_retry_test_uri) ######################################################################################################################################## ### Run Test Case for Retry Strategy ################################################################################################### ######################################################################################################################################## def run_test_case( scenario_id, method, case, lib_func, host, bucket, object, notification, hmac_key, file_data, ): scenario = _CONFORMANCE_TESTS[scenario_id - 1] expect_success = scenario["expectSuccess"] precondition_provided = scenario["preconditionProvided"] method_name = method["name"] instructions = case["instructions"] try: r = _create_retry_test(host, method_name, instructions) id = r["id"] except Exception as e: raise Exception( "Error creating retry test for {}: {}".format(method_name, e) ).with_traceback(e.__traceback__) # Run retry tests on library methods. try: _run_retry_test( host, id, lib_func, precondition_provided, bucket, object, notification, hmac_key, file_data, ) except Exception as e: logging.exception( "Caught an exception while running retry instructions\n {}".format(e) ) success_results = False else: success_results = True # Assert expected success for each scenario. assert ( expect_success == success_results ), "Retry API call expected_success was {}, should be {}".format( success_results, expect_success ) # Verify that all instructions were used up during the test # (indicates that the client sent the correct requests). status_response = _get_retry_test(host, id) assert ( status_response["completed"] is True ), "Retry test not completed; unused instructions:{}".format( status_response["instructions"] ) # Clean up and close out test in testbench. _delete_retry_test(host, id) ######################################################################################################################################## ### Run Conformance Tests for Retry Strategy ########################################################################################### ######################################################################################################################################## # Pull storage-testbench docker image subprocess.run(_PULL_CMD) time.sleep(5) # Run docker image to start storage-testbench with subprocess.Popen(_RUN_CMD) as proc: # Run retry conformance tests for scenario in _CONFORMANCE_TESTS: id = scenario["id"] methods = scenario["methods"] cases = scenario["cases"] for i, c in enumerate(cases): for m in methods: method_name = m["name"] method_group = m["group"] if m.get("group", None) else m["name"] if method_group not in method_mapping: logging.info("No tests for operation {}".format(method_name)) continue for lib_func in method_mapping[method_group]: test_name = "test-S{}-{}-{}-{}".format( id, method_name, lib_func.__name__, i ) globals()[test_name] = functools.partial( run_test_case, id, m, c, lib_func, _HOST ) time.sleep(5) proc.kill()
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( fix_xml_ampersands, orderedSet, parse_duration, qualities, strip_jsonp, unified_strdate, ExtractorError, ) class NPOBaseIE(InfoExtractor): def _get_token(self, video_id): token_page = self._download_webpage( 'http://ida.omroep.nl/npoplayer/i.js', video_id, note='Downloading token') token = self._search_regex( r'npoplayer\.token = "(.+?)"', token_page, 'token') # Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js token_l = list(token) first = second = None for i in range(5, len(token_l) - 4): if token_l[i].isdigit(): if first is None: first = i elif second is None: second = i if first is None or second is None: first = 12 second = 13 token_l[first], token_l[second] = token_l[second], token_l[first] return ''.join(token_l) class NPOIE(NPOBaseIE): IE_NAME = 'npo' IE_DESC = 'npo.nl and ntr.nl' _VALID_URL = r'''(?x) (?: npo:| https?:// (?:www\.)? (?: npo\.nl/(?!live|radio)(?:[^/]+/){2}| ntr\.nl/(?:[^/]+/){2,}| omroepwnl\.nl/video/fragment/[^/]+__ ) ) (?P<id>[^/?#]+) ''' _TESTS = [ { 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719', 'md5': '4b3f9c429157ec4775f2c9cb7b911016', 'info_dict': { 'id': 'VPWON_1220719', 'ext': 'm4v', 'title': 'Nieuwsuur', 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.', 'upload_date': '20140622', }, }, { 'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800', 'md5': 'da50a5787dbfc1603c4ad80f31c5120b', 'info_dict': { 'id': 'VARA_101191800', 'ext': 'm4v', 'title': 'De Mega Mike & Mega Thomas show: The best of.', 'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4', 'upload_date': '20090227', 'duration': 2400, }, }, { 'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289', 'md5': 'f8065e4e5a7824068ed3c7e783178f2c', 'info_dict': { 'id': 'VPWON_1169289', 'ext': 'm4v', 'title': 'Tegenlicht: De toekomst komt uit Afrika', 'description': 'md5:52cf4eefbc96fffcbdc06d024147abea', 'upload_date': '20130225', 'duration': 3000, }, }, { 'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706', 'info_dict': { 'id': 'WO_VPRO_043706', 'ext': 'wmv', 'title': 'De nieuwe mens - Deel 1', 'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b', 'duration': 4680, }, 'params': { # mplayer mms download 'skip_download': True, } }, # non asf in streams { 'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771', 'md5': 'b3da13de374cbe2d5332a7e910bef97f', 'info_dict': { 'id': 'WO_NOS_762771', 'ext': 'mp4', 'title': 'Hoe gaat Europa verder na Parijs?', }, }, { 'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content', 'md5': '01c6a2841675995da1f0cf776f03a9c3', 'info_dict': { 'id': 'VPWON_1233944', 'ext': 'm4v', 'title': 'Aap, poot, pies', 'description': 'md5:c9c8005d1869ae65b858e82c01a91fde', 'upload_date': '20150508', 'duration': 599, }, }, { 'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698', 'md5': 'd30cd8417b8b9bca1fdff27428860d08', 'info_dict': { 'id': 'POW_00996502', 'ext': 'm4v', 'title': '''"Dit is wel een 'landslide'..."''', 'description': 'md5:f8d66d537dfb641380226e31ca57b8e8', 'upload_date': '20150508', 'duration': 462, }, } ] def _real_extract(self, url): video_id = self._match_id(url) return self._get_info(video_id) def _get_info(self, video_id): metadata = self._download_json( 'http://e.omroep.nl/metadata/%s' % video_id, video_id, # We have to remove the javascript callback transform_source=strip_jsonp, ) # For some videos actual video id (prid) is different (e.g. for # http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698 # video id is POMS_WNL_853698 but prid is POW_00996502) video_id = metadata.get('prid') or video_id # titel is too generic in some cases so utilize aflevering_titel as well # when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html) title = metadata['titel'] sub_title = metadata.get('aflevering_titel') if sub_title and sub_title != title: title += ': %s' % sub_title token = self._get_token(video_id) formats = [] pubopties = metadata.get('pubopties') if pubopties: quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std']) for format_id in pubopties: format_info = self._download_json( 'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s' % (video_id, format_id, token), video_id, 'Downloading %s JSON' % format_id) if format_info.get('error_code', 0) or format_info.get('errorcode', 0): continue streams = format_info.get('streams') if streams: try: video_info = self._download_json( streams[0] + '&type=json', video_id, 'Downloading %s stream JSON' % format_id) except ExtractorError as ee: if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: error = (self._parse_json(ee.cause.read().decode(), video_id, fatal=False) or {}).get('errorstring') if error: raise ExtractorError(error, expected=True) raise else: video_info = format_info video_url = video_info.get('url') if not video_url: continue if format_id == 'adaptive': formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4')) else: formats.append({ 'url': video_url, 'format_id': format_id, 'quality': quality(format_id), }) streams = metadata.get('streams') if streams: for i, stream in enumerate(streams): stream_url = stream.get('url') if not stream_url: continue if '.asf' not in stream_url: formats.append({ 'url': stream_url, 'quality': stream.get('kwaliteit'), }) continue asx = self._download_xml( stream_url, video_id, 'Downloading stream %d ASX playlist' % i, transform_source=fix_xml_ampersands) ref = asx.find('./ENTRY/Ref') if ref is None: continue video_url = ref.get('href') if not video_url: continue formats.append({ 'url': video_url, 'ext': stream.get('formaat', 'asf'), 'quality': stream.get('kwaliteit'), }) self._sort_formats(formats) subtitles = {} if metadata.get('tt888') == 'ja': subtitles['nl'] = [{ 'ext': 'vtt', 'url': 'http://e.omroep.nl/tt888/%s' % video_id, }] return { 'id': video_id, 'title': title, 'description': metadata.get('info'), 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'], 'upload_date': unified_strdate(metadata.get('gidsdatum')), 'duration': parse_duration(metadata.get('tijdsduur')), 'formats': formats, 'subtitles': subtitles, } class NPOLiveIE(NPOBaseIE): IE_NAME = 'npo.nl:live' _VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)' _TEST = { 'url': 'http://www.npo.nl/live/npo-1', 'info_dict': { 'id': 'LI_NEDERLAND1_136692', 'display_id': 'npo-1', 'ext': 'mp4', 'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Livestream', 'is_live': True, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) live_id = self._search_regex( r'data-prid="([^"]+)"', webpage, 'live id') metadata = self._download_json( 'http://e.omroep.nl/metadata/%s' % live_id, display_id, transform_source=strip_jsonp) token = self._get_token(display_id) formats = [] streams = metadata.get('streams') if streams: for stream in streams: stream_type = stream.get('type').lower() # smooth streaming is not supported if stream_type in ['ss', 'ms']: continue stream_info = self._download_json( 'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp' % (stream.get('url'), token), display_id, 'Downloading %s JSON' % stream_type) if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0): continue stream_url = self._download_json( stream_info['stream'], display_id, 'Downloading %s URL' % stream_type, 'Unable to download %s URL' % stream_type, transform_source=strip_jsonp, fatal=False) if not stream_url: continue if stream_type == 'hds': f4m_formats = self._extract_f4m_formats(stream_url, display_id) # f4m downloader downloads only piece of live stream for f4m_format in f4m_formats: f4m_format['preference'] = -1 formats.extend(f4m_formats) elif stream_type == 'hls': formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4')) else: formats.append({ 'url': stream_url, 'preference': -10, }) self._sort_formats(formats) return { 'id': live_id, 'display_id': display_id, 'title': self._live_title(metadata['titel']), 'description': metadata['info'], 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'], 'formats': formats, 'is_live': True, } class NPORadioIE(InfoExtractor): IE_NAME = 'npo.nl:radio' _VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$' _TEST = { 'url': 'http://www.npo.nl/radio/radio-1', 'info_dict': { 'id': 'radio-1', 'ext': 'mp3', 'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { 'skip_download': True, } } @staticmethod def _html_get_attribute_regex(attribute): return r'{0}\s*=\s*\'([^\']+)\''.format(attribute) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( self._html_get_attribute_regex('data-channel'), webpage, 'title') stream = self._parse_json( self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'), video_id) codec = stream.get('codec') return { 'id': video_id, 'url': stream['url'], 'title': self._live_title(title), 'acodec': codec, 'ext': codec, 'is_live': True, } class NPORadioFragmentIE(InfoExtractor): IE_NAME = 'npo.nl:radio:fragment' _VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)' _TEST = { 'url': 'http://www.npo.nl/radio/radio-5/fragment/174356', 'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2', 'info_dict': { 'id': '174356', 'ext': 'mp3', 'title': 'Jubileumconcert Willeke Alberti', }, } def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id) title = self._html_search_regex( r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id, webpage, 'title') audio_url = self._search_regex( r"data-streams='([^']+)'", webpage, 'audio url') return { 'id': audio_id, 'url': audio_url, 'title': title, } class SchoolTVIE(InfoExtractor): IE_NAME = 'schooltv' _VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://www.schooltv.nl/video/ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam/', 'info_dict': { 'id': 'WO_NTR_429477', 'display_id': 'ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam', 'title': 'Ademhaling: De hele dag haal je adem. Maar wat gebeurt er dan eigenlijk in je lichaam?', 'ext': 'mp4', 'description': 'md5:abfa0ff690adb73fd0297fd033aaa631' }, 'params': { # Skip because of m3u8 download 'skip_download': True } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'data-mid=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video_id', group='id') return { '_type': 'url_transparent', 'ie_key': 'NPO', 'url': 'npo:%s' % video_id, 'display_id': display_id } class NPOPlaylistBaseIE(NPOIE): def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id) for video_id in orderedSet(re.findall(self._PLAYLIST_ENTRY_RE, webpage)) ] playlist_title = self._html_search_regex( self._PLAYLIST_TITLE_RE, webpage, 'playlist title', default=None) or self._og_search_title(webpage) return self.playlist_result(entries, playlist_id, playlist_title) class VPROIE(NPOPlaylistBaseIE): IE_NAME = 'vpro' _VALID_URL = r'https?://(?:www\.)?(?:(?:tegenlicht\.)?vpro|2doc)\.nl/(?:[^/]+/)*(?P<id>[^/]+)\.html' _PLAYLIST_TITLE_RE = (r'<h1[^>]+class=["\'].*?\bmedia-platform-title\b.*?["\'][^>]*>([^<]+)', r'<h5[^>]+class=["\'].*?\bmedia-platform-subtitle\b.*?["\'][^>]*>([^<]+)') _PLAYLIST_ENTRY_RE = r'data-media-id="([^"]+)"' _TESTS = [ { 'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html', 'md5': 'f8065e4e5a7824068ed3c7e783178f2c', 'info_dict': { 'id': 'VPWON_1169289', 'ext': 'm4v', 'title': 'De toekomst komt uit Afrika', 'description': 'md5:52cf4eefbc96fffcbdc06d024147abea', 'upload_date': '20130225', }, 'skip': 'Video gone', }, { 'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html', 'info_dict': { 'id': 'sergio-herman', 'title': 'sergio herman: fucking perfect', }, 'playlist_count': 2, }, { # playlist with youtube embed 'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html', 'info_dict': { 'id': 'education-education', 'title': 'education education', }, 'playlist_count': 2, }, { 'url': 'http://www.2doc.nl/documentaires/series/2doc/2015/oktober/de-tegenprestatie.html', 'info_dict': { 'id': 'de-tegenprestatie', 'title': 'De Tegenprestatie', }, 'playlist_count': 2, }, { 'url': 'http://www.2doc.nl/speel~VARA_101375237~mh17-het-verdriet-van-nederland~.html', 'info_dict': { 'id': 'VARA_101375237', 'ext': 'm4v', 'title': 'MH17: Het verdriet van Nederland', 'description': 'md5:09e1a37c1fdb144621e22479691a9f18', 'upload_date': '20150716', }, 'params': { # Skip because of m3u8 download 'skip_download': True }, } ] class WNLIE(NPOPlaylistBaseIE): IE_NAME = 'wnl' _VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+' _PLAYLIST_TITLE_RE = r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>' _PLAYLIST_ENTRY_RE = r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>Deel \d+' _TESTS = [{ 'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515', 'info_dict': { 'id': 'vandaag-de-dag-6-mei', 'title': 'Vandaag de Dag 6 mei', }, 'playlist_count': 4, }] class AndereTijdenIE(NPOPlaylistBaseIE): IE_NAME = 'anderetijden' _VALID_URL = r'https?://(?:www\.)?anderetijden\.nl/programma/(?:[^/]+/)+(?P<id>[^/?#&]+)' _PLAYLIST_TITLE_RE = r'(?s)<h1[^>]+class=["\'].*?\bpage-title\b.*?["\'][^>]*>(.+?)</h1>' _PLAYLIST_ENTRY_RE = r'<figure[^>]+class=["\']episode-container episode-page["\'][^>]+data-prid=["\'](.+?)["\']' _TESTS = [{ 'url': 'http://anderetijden.nl/programma/1/Andere-Tijden/aflevering/676/Duitse-soldaten-over-de-Slag-bij-Arnhem', 'info_dict': { 'id': 'Duitse-soldaten-over-de-Slag-bij-Arnhem', 'title': 'Duitse soldaten over de Slag bij Arnhem', }, 'playlist_count': 3, }]
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import array from abc import ABCMeta import copy from typing import ( Any, Callable, cast, Generic, List, Optional, overload, TypeVar, Union, TYPE_CHECKING, ) import numpy as np from py4j.java_gateway import JavaObject from pyspark.ml.linalg import DenseVector, Vector, Matrix from pyspark.ml.util import Identifiable if TYPE_CHECKING: from pyspark.ml._typing import ParamMap __all__ = ["Param", "Params", "TypeConverters"] T = TypeVar("T") P = TypeVar("P", bound="Params") class Param(Generic[T]): """ A param with self-contained documentation. .. versionadded:: 1.3.0 """ def __init__( self, parent: Identifiable, name: str, doc: str, typeConverter: Optional[Callable[[Any], T]] = None, ): if not isinstance(parent, Identifiable): raise TypeError("Parent must be an Identifiable but got type %s." % type(parent)) self.parent = parent.uid self.name = str(name) self.doc = str(doc) self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter def _copy_new_parent(self, parent: Any) -> "Param": """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent) def __str__(self) -> str: return str(self.parent) + "__" + self.name def __repr__(self) -> str: return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc) def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, Param): return self.parent == other.parent and self.name == other.name else: return False class TypeConverters: """ Factory methods for common type conversion functions for `Param.typeConverter`. .. versionadded:: 2.0.0 """ @staticmethod def _is_numeric(value: Any) -> bool: vtype = type(value) return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == "long" @staticmethod def _is_integer(value: Any) -> bool: return TypeConverters._is_numeric(value) and float(value).is_integer() @staticmethod def _can_convert_to_list(value: Any) -> bool: vtype = type(value) return vtype in [list, np.ndarray, tuple, range, array.array] or isinstance(value, Vector) @staticmethod def _can_convert_to_string(value: Any) -> bool: vtype = type(value) return isinstance(value, str) or vtype in [np.unicode_, np.string_, np.str_] @staticmethod def identity(value: "T") -> "T": """ Dummy converter that just returns value. """ return value @staticmethod def toList(value: Any) -> List: """ Convert a value to a list, if possible. """ if type(value) == list: return value elif type(value) in [np.ndarray, tuple, range, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value) @staticmethod def toListFloat(value: Any) -> List[float]: """ Convert a value to list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value) @staticmethod def toListListFloat(value: Any) -> List[List[float]]: """ Convert a value to list of list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) return [TypeConverters.toListFloat(v) for v in value] raise TypeError("Could not convert %s to list of list of floats" % value) @staticmethod def toListInt(value: Any) -> List[int]: """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value) @staticmethod def toListString(value: Any) -> List[str]: """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value) @staticmethod def toVector(value: Any) -> Vector: """ Convert a value to a MLlib Vector, if possible. """ if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value) @staticmethod def toMatrix(value: Any) -> Matrix: """ Convert a value to a MLlib Matrix, if possible. """ if isinstance(value, Matrix): return value raise TypeError("Could not convert %s to matrix" % value) @staticmethod def toFloat(value: Any) -> float: """ Convert a value to a float, if possible. """ if TypeConverters._is_numeric(value): return float(value) else: raise TypeError("Could not convert %s to float" % value) @staticmethod def toInt(value: Any) -> int: """ Convert a value to an int, if possible. """ if TypeConverters._is_integer(value): return int(value) else: raise TypeError("Could not convert %s to int" % value) @staticmethod def toString(value: Any) -> str: """ Convert a value to a string, if possible. """ if isinstance(value, str): return value elif type(value) in [np.string_, np.str_, np.unicode_]: return str(value) else: raise TypeError("Could not convert %s to string type" % type(value)) @staticmethod def toBoolean(value: Any) -> bool: """ Convert a value to a boolean, if possible. """ if type(value) == bool: return value else: raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value)) class Params(Identifiable, metaclass=ABCMeta): """ Components that take parameters. This also provides an internal param map to store parameter values attached to the instance. .. versionadded:: 1.3.0 """ def __init__(self) -> None: super(Params, self).__init__() #: internal param map for user-supplied values param map self._paramMap: "ParamMap" = {} #: internal param map for default values self._defaultParamMap: "ParamMap" = {} #: value returned by :py:func:`params` self._params: Optional[List[Param]] = None # Copy the params from the class to the object self._copy_params() def _copy_params(self) -> None: """ Copy all params defined on the class to current object. """ cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self)) @property def params(self) -> List[Param]: """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list( filter( lambda attr: isinstance(attr, Param), [ getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property) ], ) ) return self._params def explainParam(self, param: Union[str, Param]) -> str: """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = cast(Param, self._resolveParam(param)) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr) def explainParams(self) -> str: """ Returns the documentation of all params with their optionally default values and user-supplied values. """ return "\n".join([self.explainParam(param) for param in self.params]) def getParam(self, paramName: str) -> Param: """ Gets a param by its name. """ param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName) def isSet(self, param: Union[str, Param[Any]]) -> bool: """ Checks whether a param is explicitly set by user. """ param = self._resolveParam(param) return param in self._paramMap def hasDefault(self, param: Union[str, Param[Any]]) -> bool: """ Checks whether a param has a default value. """ param = self._resolveParam(param) return param in self._defaultParamMap def isDefined(self, param: Union[str, Param[Any]]) -> bool: """ Checks whether a param is explicitly set by user or has a default value. """ return self.isSet(param) or self.hasDefault(param) def hasParam(self, paramName: str) -> bool: """ Tests whether this instance contains a param with a given (string) name. """ if isinstance(paramName, str): p = getattr(self, paramName, None) return isinstance(p, Param) else: raise TypeError("hasParam(): paramName must be a string") @overload def getOrDefault(self, param: str) -> Any: ... @overload def getOrDefault(self, param: Param[T]) -> T: ... def getOrDefault(self, param: Union[str, Param[T]]) -> Union[Any, T]: """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """ param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param] def extractParamMap(self, extra: Optional["ParamMap"] = None) -> "ParamMap": """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. Parameters ---------- extra : dict, optional extra param values Returns ------- dict merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap def copy(self: P, extra: Optional["ParamMap"] = None) -> P: """ Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. Parameters ---------- extra : dict, optional Extra parameters to copy to the new instance Returns ------- :py:class:`Params` Copy of this instance """ if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra) def set(self, param: Param, value: Any) -> None: """ Sets a parameter in the embedded param map. """ self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value def _shouldOwn(self, param: Param) -> None: """ Validates that the input param belongs to this Params instance. """ if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self)) def _resolveParam(self, param: Union[str, Param]) -> Param: """ Resolves a param and validates the ownership. Parameters ---------- param : str or :py:class:`Param` param name or the param instance, which must belong to this Params instance Returns ------- :py:class:`Param` resolved param instance """ if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, str): return self.getParam(param) else: raise TypeError("Cannot resolve %r as a param." % param) def _testOwnParam(self, param_parent: str, param_name: str) -> bool: """ Test the ownership. Return True or False """ return self.uid == param_parent and self.hasParam(param_name) @staticmethod def _dummy() -> "Params": """ Returns a dummy Params instance used as a placeholder to generate docs. """ dummy = Params() dummy.uid = "undefined" return dummy def _set(self: P, **kwargs: Any) -> P: """ Sets user-supplied params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self def clear(self, param: Param) -> None: """ Clears a param from the param map if it has been explicitly set. """ if self.isSet(param): del self._paramMap[param] def _setDefault(self: P, **kwargs: Any) -> P: """ Sets default params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None and not isinstance(value, JavaObject): try: value = p.typeConverter(value) except TypeError as e: raise TypeError( 'Invalid default param value given for param "%s". %s' % (p.name, e) ) self._defaultParamMap[p] = value return self def _copyValues(self, to: P, extra: Optional["ParamMap"] = None) -> P: """ Copies param values from this instance to another instance for params shared by them. Parameters ---------- to : :py:class:`Params` the target instance extra : dict, optional extra params to be copied Returns ------- :py:class:`Params` the target instance with param values copied """ paramMap = self._paramMap.copy() if isinstance(extra, dict): for param, value in extra.items(): if isinstance(param, Param): paramMap[param] = value else: raise TypeError( "Expecting a valid instance of Param, but received: {}".format(param) ) elif extra is not None: raise TypeError( "Expecting a dict, but received an object of type {}.".format(type(extra)) ) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to def _resetUid(self: P, newUid: Any) -> P: """ Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). Parameters ---------- newUid new uid to use, which is converted to unicode Returns ------- :py:class:`Params` same instance, but with the uid and Param.parent values updated, including within param maps """ newUid = str(newUid) self.uid = newUid newDefaultParamMap = dict() newParamMap = dict() for param in self.params: newParam = copy.copy(param) newParam.parent = newUid if param in self._defaultParamMap: newDefaultParamMap[newParam] = self._defaultParamMap[param] if param in self._paramMap: newParamMap[newParam] = self._paramMap[param] param.parent = newUid self._defaultParamMap = newDefaultParamMap self._paramMap = newParamMap return self
# -*- coding: utf-8 -*- """Parser for Linux utmp files.""" from __future__ import unicode_literals from dfdatetime import posix_time as dfdatetime_posix_time from plaso.containers import events from plaso.containers import time_events from plaso.lib import errors from plaso.lib import definitions from plaso.parsers import dtfabric_parser from plaso.parsers import manager class UtmpEventData(events.EventData): """utmp event data. Attributes: exit_status (int): exit status. hostname (str): hostname or IP address. ip_address (str): IP address from the connection. pid (int): process identifier (PID). terminal_identifier (int): inittab identifier. terminal (str): type of terminal. type (int): type of login. username (str): user name. """ DATA_TYPE = 'linux:utmp:event' def __init__(self): """Initializes event data.""" super(UtmpEventData, self).__init__(data_type=self.DATA_TYPE) self.exit_status = None self.hostname = None self.ip_address = None self.pid = None self.terminal_identifier = None self.terminal = None self.type = None self.username = None class UtmpParser(dtfabric_parser.DtFabricBaseParser): """Parser for Linux libc6 utmp files.""" NAME = 'utmp' DATA_FORMAT = 'Linux libc6 utmp file' _DEFINITION_FILE = 'utmp.yaml' _EMPTY_IP_ADDRESS = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) _SUPPORTED_TYPES = frozenset(range(0, 10)) _DEAD_PROCESS_TYPE = 8 def _ReadEntry(self, parser_mediator, file_object, file_offset): """Reads an utmp entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the data relative to the start of the file-like object. Returns: tuple: containing: int: timestamp, which contains the number of microseconds since January 1, 1970, 00:00:00 UTC. UtmpEventData: event data of the utmp entry read. list[str]: warning messages emitted by the parser. Raises: ParseError: if the entry cannot be parsed. """ entry_map = self._GetDataTypeMap('linux_libc6_utmp_entry') warning_strings = [] try: entry, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse utmp entry at offset: 0x{0:08x} with error: ' '{1!s}.').format(file_offset, exception)) if entry.type not in self._SUPPORTED_TYPES: raise errors.ParseError('Unsupported type: {0:d}'.format(entry.type)) encoding = parser_mediator.codepage or 'utf-8' try: username = entry.username.split(b'\x00')[0] username = username.decode(encoding) except UnicodeDecodeError: warning_strings.append('unable to decode username string') username = None try: terminal = entry.terminal.split(b'\x00')[0] terminal = terminal.decode(encoding) except UnicodeDecodeError: warning_strings.append('unable to decode terminal string') terminal = None if terminal == '~': terminal = 'system boot' try: hostname = entry.hostname.split(b'\x00')[0] hostname = hostname.decode(encoding) except UnicodeDecodeError: warning_strings.append('unable to decode hostname string') hostname = None if not hostname or hostname == ':0': hostname = 'localhost' if entry.ip_address[4:] == self._EMPTY_IP_ADDRESS[4:]: ip_address = self._FormatPackedIPv4Address(entry.ip_address[:4]) else: ip_address = self._FormatPackedIPv6Address(entry.ip_address) # TODO: add termination status. event_data = UtmpEventData() event_data.hostname = hostname event_data.exit_status = entry.exit_status event_data.ip_address = ip_address event_data.offset = file_offset event_data.pid = entry.pid event_data.terminal = terminal event_data.terminal_identifier = entry.terminal_identifier event_data.type = entry.type event_data.username = username timestamp = entry.microseconds + ( entry.timestamp * definitions.MICROSECONDS_PER_SECOND) return timestamp, event_data, warning_strings def ParseFileObject(self, parser_mediator, file_object): """Parses an utmp file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_offset = 0 try: timestamp, event_data, warning_strings = self._ReadEntry( parser_mediator, file_object, file_offset) except errors.ParseError as exception: raise errors.UnableToParseFile( 'Unable to parse first utmp entry with error: {0!s}'.format( exception)) if not timestamp: raise errors.UnableToParseFile( 'Unable to parse first utmp entry with error: missing timestamp') if not event_data.username and event_data.type != self._DEAD_PROCESS_TYPE: raise errors.UnableToParseFile( 'Unable to parse first utmp entry with error: missing username') if warning_strings: all_warnings = ', '.join(warning_strings) raise errors.UnableToParseFile( 'Unable to parse first utmp entry with error: {0:s}'.format( all_warnings)) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data) file_offset = file_object.tell() file_size = file_object.get_size() while file_offset < file_size: if parser_mediator.abort: break try: timestamp, event_data, warning_strings = self._ReadEntry( parser_mediator, file_object, file_offset) except errors.ParseError: # Note that the utmp file can contain trailing data. break date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data) for warning_string in warning_strings: parser_mediator.ProduceExtractionWarning(warning_string) file_offset = file_object.tell() manager.ParsersManager.RegisterParser(UtmpParser)
""" Support for Blink4home cameras. For more details about this component, please refer to the documentation at https://home-assistant.io/components/blink4home/ """ import asyncio import logging from datetime import timedelta import json import requests import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import CONF_USERNAME, CONF_PASSWORD _CONFIGURING = {} _LOGGER = logging.getLogger(__name__) ATTRIBUTION = 'Blink4Home camera support' MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5) CONF_NETWORK_ID = 'network_id' DOMAIN = 'blink4home' DATA_BLINK = 'blink4home' API_URL = 'https://rest.prir.immedia-semi.com' CLIENT_SPECIFIER = 'Home-Assistant | ' HEADERS = {'Content-Type': 'application/json'} TOKEN_HEADER = 'TOKEN_AUTH' UNAUTH_ACCESS = 'Unauthorized access' CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_NETWORK_ID, default=0): cv.positive_int, }) }, extra=vol.ALLOW_EXTRA) @asyncio.coroutine def async_setup(hass, config): """Setting up the platform.""" blink_config = config.get(DOMAIN, {}) username = blink_config.get(CONF_USERNAME) password = blink_config.get(CONF_PASSWORD) network = blink_config.get(CONF_NETWORK_ID) version = hass.config.as_dict()['version'] def arm_blink(call): """Arm the system.""" blink = hass.data[DATA_BLINK] blink.arm() def disarm_blink(call): """Disarm the system.""" blink = hass.data[DATA_BLINK] blink.disarm() blink = Blink4Home(username, password, version, network) # Store data hass.data[DATA_BLINK] = blink # Add service hass.services.async_register(DOMAIN, 'arm', arm_blink) hass.services.async_register(DOMAIN, 'disarm', disarm_blink) return blink.logged_in class Blink4Home(object): """Blink4home api.""" def __init__(self, username, password, version, network): """Init the Blink4Home api.""" self._username = username self._password = password self._version = version self._api_key = "" self._network_id = "" self._network = network self._armed = False self._notifications = 0 self._logged_in = False # Login self._login() @property def logged_in(self): """Return the name of the sensor.""" return self._logged_in @property def notifications(self): """Return the amount of notifications.""" return self._notifications @property def state(self): """Return the state.""" return self._armed def _login(self, force=False): """Perform login.""" if not self._api_key or force: self._api_key = '' url = (API_URL + '/login') data = {'password': self._password, 'client_specifier': CLIENT_SPECIFIER + str(self._version), 'email': self._username} _LOGGER.debug('Sending request with: %s', json.dumps(data)) response = requests.post(url, data=json.dumps(data), headers=HEADERS, timeout=10) if response.status_code == 200: _LOGGER.debug('Received login response: %s', response.text) result = response.json() self._api_key = result['authtoken']['authtoken'] _LOGGER.debug('Got api-key: %s', self._api_key) networks = result['networks'] found = False for key, value in networks.items(): _LOGGER.debug('Network: %s, value: %s', key, value) # choose network from config or # the first one (maybe the only one) if not found and \ (self._network == 0 or str(self._network) == key): self._network_id = key found = True if found: break if found: self._logged_in = True self.update() _LOGGER.debug('Api key: %s', json.dumps(self._api_key)) _LOGGER.debug('Selected network: %s', json.dumps(self._network_id)) else: self._api_key = '' _LOGGER.debug('Received error response: %s', response.status_code) _LOGGER.error('Error logging in to the Blink4Home platform. ' 'Received status was %s.', response.status_code) def _do_post(self, url, data='', second_try=False): if not self._logged_in or not self._api_key: self._login(True) if not self._api_key: _LOGGER.error('Couldn\'t arm system. There was ' 'a problem with the login.') headers = HEADERS headers[TOKEN_HEADER] = self._api_key response = requests.post(url, data=data, headers=headers, timeout=10) if not response.status_code == 200: if response.status_code == 401 and not second_try: _LOGGER.debug('Token not valid: %s', response.status_code) self._login(True) self._do_post(url=url, data=data, second_try=True) else: _LOGGER.debug('Received error response on post: %s', response.text) _LOGGER.error('Error with the Blink4Home ' 'platform. Received status was %s.', response.status_code) return response def _do_get(self, url, second_try=False): if not self._logged_in or not self._api_key: self._login(True) if not self._api_key: _LOGGER.error('Couldn\'t arm system. ' 'There was a problem with the login.') headers = HEADERS headers[TOKEN_HEADER] = self._api_key response = requests.get(url, headers=headers, timeout=10) if not response.status_code == 200: if response.status_code == 401 and not second_try: _LOGGER.debug('Token not valid: %s', response.status_code) self._login(True) self._do_get(url=url, second_try=True) else: _LOGGER.debug('Received error response on get: %s', response.text) _LOGGER.error('Error with the Blink4Home ' 'platform. Received status was %s.', response.status_code) return response def arm(self): """Arm the system.""" _LOGGER.debug('Arming the system') response = self._do_post(API_URL + '/network/' + str(self._network_id) + '/arm') if response.status_code == 200: _LOGGER.debug('Received arm response: %s', response.text) self.update() else: _LOGGER.debug('Received error response on update: %s', response.text) _LOGGER.error('Error arming the Blink4Home ' 'platform. Received status was %s.', response.status_code) def disarm(self, second_try=False): """Disarm the system.""" _LOGGER.debug('Disarming the system') response = self._do_post(API_URL + '/network/' + str(self._network_id) + '/disarm') if response.status_code == 200: _LOGGER.debug('Received disarm response: %s', response.text) self.update() else: _LOGGER.debug('Received error response on update: %s', response.text) _LOGGER.error('Error disarming the Blink4Home ' 'platform. Received status was %s.', response.status_code) def update(self, second_try=False): """Update the status.""" _LOGGER.debug('Updating the system') response = self._do_get(API_URL + '/homescreen') if response.status_code == 200: _LOGGER.debug('Received update response: %s', response.text) result = response.json() self._armed = result['network']['armed'] self._notifications = result['network']['notifications'] else: _LOGGER.debug('Received error response on update: %s', response.text) _LOGGER.error('Error updating the Blink4Home ' 'sensor. Received status was %s.', response.status_code)
#!/usr/bin/env python2 # This file is part of the OpenMV project. # # Copyright (c) 2013-2021 Ibrahim Abdelkader <iabdalkader@openmv.io> # Copyright (c) 2013-2021 Kwabena W. Agyeman <kwagyeman@openmv.io> # # This work is licensed under the MIT license, see the file LICENSE for details. # # Openmv module with support for multiple cams. import struct import sys,time import serial import platform import numpy as np from PIL import Image __serial = [] __port = [] __FB_HDR_SIZE =12 # USB Debug commands __USBDBG_CMD = 48 __USBDBG_FW_VERSION = 0x80 __USBDBG_FRAME_SIZE = 0x81 __USBDBG_FRAME_DUMP = 0x82 __USBDBG_ARCH_STR = 0x83 __USBDBG_SCRIPT_EXEC = 0x05 __USBDBG_SCRIPT_STOP = 0x06 __USBDBG_SCRIPT_SAVE = 0x07 __USBDBG_SCRIPT_RUNNING = 0x87 __USBDBG_TEMPLATE_SAVE = 0x08 __USBDBG_DESCRIPTOR_SAVE= 0x09 __USBDBG_ATTR_READ = 0x8A __USBDBG_ATTR_WRITE = 0x0B __USBDBG_SYS_RESET = 0x0C __USBDBG_FB_ENABLE = 0x0D __USBDBG_TX_BUF_LEN = 0x8E __USBDBG_TX_BUF = 0x8F ATTR_CONTRAST =0 ATTR_BRIGHTNESS =1 ATTR_SATURATION =2 ATTR_GAINCEILING=3 __BOOTLDR_START = 0xABCD0001 __BOOTLDR_RESET = 0xABCD0002 __BOOTLDR_ERASE = 0xABCD0004 __BOOTLDR_WRITE = 0xABCD0008 def init(port, baudrate=921600, timeout=0.3): global __serial global __port # open CDC port __serial.append(serial.Serial(port, baudrate=baudrate, timeout=timeout)) __port.append(port) def disconnect(port): global __serial global __port try: idx = __port.index(port) __serial[idx].close() __serial.pop(idx) __port.pop(idx) except: pass def set_timeout(port, timeout): try: idx = __port.index(port) __serial[idx].timeout = timeout except: pass def fb_size(port): # read fb header try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FRAME_SIZE, __FB_HDR_SIZE)) return struct.unpack("III", __serial[idx].read(12)) except: return None def fb_dump(port): try: idx = __port.index(port) size = fb_size(port) if (not size[0]): # frame not ready return None if (size[2] > 2): #JPEG num_bytes = size[2] else: num_bytes = size[0]*size[1]*size[2] # read fb data __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FRAME_DUMP, num_bytes)) buff = __serial[idx].read(num_bytes) if size[2] == 1: # Grayscale y = np.fromstring(buff, dtype=np.uint8) buff = np.column_stack((y, y, y)) elif size[2] == 2: # RGB565 arr = np.fromstring(buff, dtype=np.uint16).newbyteorder('S') r = (((arr & 0xF800) >>11)*255.0/31.0).astype(np.uint8) g = (((arr & 0x07E0) >>5) *255.0/63.0).astype(np.uint8) b = (((arr & 0x001F) >>0) *255.0/31.0).astype(np.uint8) buff = np.column_stack((r,g,b)) else: # JPEG try: buff = np.asarray(Image.frombuffer("RGB", size[0:2], buff, "jpeg", "RGB", "")) except Exception as e: print ("JPEG decode error (%s)"%(e)) return None if (buff.size != (size[0]*size[1]*3)): return None return (size[0], size[1], buff.reshape((size[1], size[0], 3))) except: return None def exec_script(port, buf): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SCRIPT_EXEC, len(buf))) __serial[idx].write(buf.encode()) except: pass def stop_script(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SCRIPT_STOP, 0)) except: pass def script_running(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SCRIPT_RUNNING, 4)) return struct.unpack("I", __serial[idx].read(4))[0] except: return None def save_template(port, x, y, w, h, path): try: idx = __port.index(port) buf = struct.pack("IIII", x, y, w, h) + path __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_TEMPLATE_SAVE, len(buf))) __serial[idx].write(buf) except: pass def save_descriptor(port, x, y, w, h, path): try: idx = __port.index(port) buf = struct.pack("HHHH", x, y, w, h) + path __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_DESCRIPTOR_SAVE, len(buf))) __serial[idx].write(buf) except: pass def set_attr(port, attr, value): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_ATTR_WRITE, 8)) __serial[idx].write(struct.pack("<II", attr, value)) except: pass def get_attr(port, attr): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBIh", __USBDBG_CMD, __USBDBG_ATTR_READ, 1, attr)) return __serial[idx].read(1) except: return None def reset(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_SYS_RESET, 0)) except: pass def bootloader_start(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<I", __BOOTLDR_START)) return struct.unpack("I", __serial[idx].read(4))[0] == __BOOTLDR_START except: pass def bootloader_reset(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<I", __BOOTLDR_RESET)) except: pass def flash_erase(port, sector): try: idx = __port.index(port) __serial[idx].write(struct.pack("<II", __BOOTLDR_ERASE, sector)) except: pass def flash_write(port, buf): try: idx = __port.index(port) __serial[idx].write(struct.pack("<I", __BOOTLDR_WRITE) + buf) except: pass def tx_buf_len(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_TX_BUF_LEN, 4)) return struct.unpack("I", __serial[idx].read(4))[0] except: return None def tx_buf(port, bytes): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_TX_BUF, bytes)) return __serial[idx].read(bytes) except: return None def fw_version(port): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FW_VERSION, 12)) return struct.unpack("III", __serial[idx].read(12)) except: return None def enable_fb(port, enable): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_FB_ENABLE, 4)) __serial[idx].write(struct.pack("<I", enable)) except: pass def arch_str(): try: idx = __port.index(port) __serial[idx].write(struct.pack("<BBI", __USBDBG_CMD, __USBDBG_ARCH_STR, 64)) return __serial[idx].read(64).split('\0', 1)[0] except: return None if __name__ == '__main__': if len(sys.argv)!= 3: print ('usage: pyopenmv.py <port> <script>') sys.exit(1) with open(sys.argv[2], 'r') as fin: buf = fin.read() portname = sys.argv[1] disconnect(portname) init(portname) stop_script(portname) exec_script(portname, buf) tx_len = tx_buf_len(portname) time.sleep(0.250) if (tx_len): print(tx_buf(portname, tx_len).decode()) disconnect(portname)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from msrest import Deserializer, Serializer if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential from azure.core.pipeline.transport import HttpRequest, HttpResponse from ._configuration import NetworkManagementClientConfiguration from .operations import ApplicationGatewaysOperations from .operations import ApplicationSecurityGroupsOperations from .operations import AvailableDelegationsOperations from .operations import AvailableResourceGroupDelegationsOperations from .operations import AzureFirewallsOperations from .operations import AzureFirewallFqdnTagsOperations from .operations import BastionHostsOperations from .operations import NetworkManagementClientOperationsMixin from .operations import DdosCustomPoliciesOperations from .operations import DdosProtectionPlansOperations from .operations import AvailableEndpointServicesOperations from .operations import ExpressRouteCircuitAuthorizationsOperations from .operations import ExpressRouteCircuitPeeringsOperations from .operations import ExpressRouteCircuitConnectionsOperations from .operations import PeerExpressRouteCircuitConnectionsOperations from .operations import ExpressRouteCircuitsOperations from .operations import ExpressRouteServiceProvidersOperations from .operations import ExpressRouteCrossConnectionsOperations from .operations import ExpressRouteCrossConnectionPeeringsOperations from .operations import ExpressRouteGatewaysOperations from .operations import ExpressRouteConnectionsOperations from .operations import ExpressRoutePortsLocationsOperations from .operations import ExpressRoutePortsOperations from .operations import ExpressRouteLinksOperations from .operations import FirewallPoliciesOperations from .operations import FirewallPolicyRuleGroupsOperations from .operations import LoadBalancersOperations from .operations import LoadBalancerBackendAddressPoolsOperations from .operations import LoadBalancerFrontendIPConfigurationsOperations from .operations import InboundNatRulesOperations from .operations import LoadBalancerLoadBalancingRulesOperations from .operations import LoadBalancerOutboundRulesOperations from .operations import LoadBalancerNetworkInterfacesOperations from .operations import LoadBalancerProbesOperations from .operations import NatGatewaysOperations from .operations import NetworkInterfacesOperations from .operations import NetworkInterfaceIPConfigurationsOperations from .operations import NetworkInterfaceLoadBalancersOperations from .operations import NetworkInterfaceTapConfigurationsOperations from .operations import NetworkProfilesOperations from .operations import NetworkSecurityGroupsOperations from .operations import SecurityRulesOperations from .operations import DefaultSecurityRulesOperations from .operations import NetworkWatchersOperations from .operations import PacketCapturesOperations from .operations import ConnectionMonitorsOperations from .operations import Operations from .operations import PrivateEndpointsOperations from .operations import AvailablePrivateEndpointTypesOperations from .operations import PrivateLinkServicesOperations from .operations import PublicIPAddressesOperations from .operations import PublicIPPrefixesOperations from .operations import RouteFiltersOperations from .operations import RouteFilterRulesOperations from .operations import RouteTablesOperations from .operations import RoutesOperations from .operations import BgpServiceCommunitiesOperations from .operations import ServiceEndpointPoliciesOperations from .operations import ServiceEndpointPolicyDefinitionsOperations from .operations import ServiceTagsOperations from .operations import UsagesOperations from .operations import VirtualNetworksOperations from .operations import SubnetsOperations from .operations import ResourceNavigationLinksOperations from .operations import ServiceAssociationLinksOperations from .operations import VirtualNetworkPeeringsOperations from .operations import VirtualNetworkGatewaysOperations from .operations import VirtualNetworkGatewayConnectionsOperations from .operations import LocalNetworkGatewaysOperations from .operations import VirtualNetworkTapsOperations from .operations import VirtualRoutersOperations from .operations import VirtualRouterPeeringsOperations from .operations import VirtualWansOperations from .operations import VpnSitesOperations from .operations import VpnSiteLinksOperations from .operations import VpnSitesConfigurationOperations from .operations import VirtualHubsOperations from .operations import HubVirtualNetworkConnectionsOperations from .operations import VpnGatewaysOperations from .operations import VpnConnectionsOperations from .operations import VpnSiteLinkConnectionsOperations from .operations import VpnLinkConnectionsOperations from .operations import P2SVpnServerConfigurationsOperations from .operations import P2SVpnGatewaysOperations from .operations import WebApplicationFirewallPoliciesOperations from . import models class NetworkManagementClient(NetworkManagementClientOperationsMixin): """Network Client. :ivar application_gateways: ApplicationGatewaysOperations operations :vartype application_gateways: azure.mgmt.network.v2019_07_01.operations.ApplicationGatewaysOperations :ivar application_security_groups: ApplicationSecurityGroupsOperations operations :vartype application_security_groups: azure.mgmt.network.v2019_07_01.operations.ApplicationSecurityGroupsOperations :ivar available_delegations: AvailableDelegationsOperations operations :vartype available_delegations: azure.mgmt.network.v2019_07_01.operations.AvailableDelegationsOperations :ivar available_resource_group_delegations: AvailableResourceGroupDelegationsOperations operations :vartype available_resource_group_delegations: azure.mgmt.network.v2019_07_01.operations.AvailableResourceGroupDelegationsOperations :ivar azure_firewalls: AzureFirewallsOperations operations :vartype azure_firewalls: azure.mgmt.network.v2019_07_01.operations.AzureFirewallsOperations :ivar azure_firewall_fqdn_tags: AzureFirewallFqdnTagsOperations operations :vartype azure_firewall_fqdn_tags: azure.mgmt.network.v2019_07_01.operations.AzureFirewallFqdnTagsOperations :ivar bastion_hosts: BastionHostsOperations operations :vartype bastion_hosts: azure.mgmt.network.v2019_07_01.operations.BastionHostsOperations :ivar ddos_custom_policies: DdosCustomPoliciesOperations operations :vartype ddos_custom_policies: azure.mgmt.network.v2019_07_01.operations.DdosCustomPoliciesOperations :ivar ddos_protection_plans: DdosProtectionPlansOperations operations :vartype ddos_protection_plans: azure.mgmt.network.v2019_07_01.operations.DdosProtectionPlansOperations :ivar available_endpoint_services: AvailableEndpointServicesOperations operations :vartype available_endpoint_services: azure.mgmt.network.v2019_07_01.operations.AvailableEndpointServicesOperations :ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations :vartype express_route_circuit_authorizations: azure.mgmt.network.v2019_07_01.operations.ExpressRouteCircuitAuthorizationsOperations :ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations :vartype express_route_circuit_peerings: azure.mgmt.network.v2019_07_01.operations.ExpressRouteCircuitPeeringsOperations :ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations :vartype express_route_circuit_connections: azure.mgmt.network.v2019_07_01.operations.ExpressRouteCircuitConnectionsOperations :ivar peer_express_route_circuit_connections: PeerExpressRouteCircuitConnectionsOperations operations :vartype peer_express_route_circuit_connections: azure.mgmt.network.v2019_07_01.operations.PeerExpressRouteCircuitConnectionsOperations :ivar express_route_circuits: ExpressRouteCircuitsOperations operations :vartype express_route_circuits: azure.mgmt.network.v2019_07_01.operations.ExpressRouteCircuitsOperations :ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations :vartype express_route_service_providers: azure.mgmt.network.v2019_07_01.operations.ExpressRouteServiceProvidersOperations :ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations :vartype express_route_cross_connections: azure.mgmt.network.v2019_07_01.operations.ExpressRouteCrossConnectionsOperations :ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations :vartype express_route_cross_connection_peerings: azure.mgmt.network.v2019_07_01.operations.ExpressRouteCrossConnectionPeeringsOperations :ivar express_route_gateways: ExpressRouteGatewaysOperations operations :vartype express_route_gateways: azure.mgmt.network.v2019_07_01.operations.ExpressRouteGatewaysOperations :ivar express_route_connections: ExpressRouteConnectionsOperations operations :vartype express_route_connections: azure.mgmt.network.v2019_07_01.operations.ExpressRouteConnectionsOperations :ivar express_route_ports_locations: ExpressRoutePortsLocationsOperations operations :vartype express_route_ports_locations: azure.mgmt.network.v2019_07_01.operations.ExpressRoutePortsLocationsOperations :ivar express_route_ports: ExpressRoutePortsOperations operations :vartype express_route_ports: azure.mgmt.network.v2019_07_01.operations.ExpressRoutePortsOperations :ivar express_route_links: ExpressRouteLinksOperations operations :vartype express_route_links: azure.mgmt.network.v2019_07_01.operations.ExpressRouteLinksOperations :ivar firewall_policies: FirewallPoliciesOperations operations :vartype firewall_policies: azure.mgmt.network.v2019_07_01.operations.FirewallPoliciesOperations :ivar firewall_policy_rule_groups: FirewallPolicyRuleGroupsOperations operations :vartype firewall_policy_rule_groups: azure.mgmt.network.v2019_07_01.operations.FirewallPolicyRuleGroupsOperations :ivar load_balancers: LoadBalancersOperations operations :vartype load_balancers: azure.mgmt.network.v2019_07_01.operations.LoadBalancersOperations :ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations :vartype load_balancer_backend_address_pools: azure.mgmt.network.v2019_07_01.operations.LoadBalancerBackendAddressPoolsOperations :ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations :vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2019_07_01.operations.LoadBalancerFrontendIPConfigurationsOperations :ivar inbound_nat_rules: InboundNatRulesOperations operations :vartype inbound_nat_rules: azure.mgmt.network.v2019_07_01.operations.InboundNatRulesOperations :ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations :vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2019_07_01.operations.LoadBalancerLoadBalancingRulesOperations :ivar load_balancer_outbound_rules: LoadBalancerOutboundRulesOperations operations :vartype load_balancer_outbound_rules: azure.mgmt.network.v2019_07_01.operations.LoadBalancerOutboundRulesOperations :ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations :vartype load_balancer_network_interfaces: azure.mgmt.network.v2019_07_01.operations.LoadBalancerNetworkInterfacesOperations :ivar load_balancer_probes: LoadBalancerProbesOperations operations :vartype load_balancer_probes: azure.mgmt.network.v2019_07_01.operations.LoadBalancerProbesOperations :ivar nat_gateways: NatGatewaysOperations operations :vartype nat_gateways: azure.mgmt.network.v2019_07_01.operations.NatGatewaysOperations :ivar network_interfaces: NetworkInterfacesOperations operations :vartype network_interfaces: azure.mgmt.network.v2019_07_01.operations.NetworkInterfacesOperations :ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations :vartype network_interface_ip_configurations: azure.mgmt.network.v2019_07_01.operations.NetworkInterfaceIPConfigurationsOperations :ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations :vartype network_interface_load_balancers: azure.mgmt.network.v2019_07_01.operations.NetworkInterfaceLoadBalancersOperations :ivar network_interface_tap_configurations: NetworkInterfaceTapConfigurationsOperations operations :vartype network_interface_tap_configurations: azure.mgmt.network.v2019_07_01.operations.NetworkInterfaceTapConfigurationsOperations :ivar network_profiles: NetworkProfilesOperations operations :vartype network_profiles: azure.mgmt.network.v2019_07_01.operations.NetworkProfilesOperations :ivar network_security_groups: NetworkSecurityGroupsOperations operations :vartype network_security_groups: azure.mgmt.network.v2019_07_01.operations.NetworkSecurityGroupsOperations :ivar security_rules: SecurityRulesOperations operations :vartype security_rules: azure.mgmt.network.v2019_07_01.operations.SecurityRulesOperations :ivar default_security_rules: DefaultSecurityRulesOperations operations :vartype default_security_rules: azure.mgmt.network.v2019_07_01.operations.DefaultSecurityRulesOperations :ivar network_watchers: NetworkWatchersOperations operations :vartype network_watchers: azure.mgmt.network.v2019_07_01.operations.NetworkWatchersOperations :ivar packet_captures: PacketCapturesOperations operations :vartype packet_captures: azure.mgmt.network.v2019_07_01.operations.PacketCapturesOperations :ivar connection_monitors: ConnectionMonitorsOperations operations :vartype connection_monitors: azure.mgmt.network.v2019_07_01.operations.ConnectionMonitorsOperations :ivar operations: Operations operations :vartype operations: azure.mgmt.network.v2019_07_01.operations.Operations :ivar private_endpoints: PrivateEndpointsOperations operations :vartype private_endpoints: azure.mgmt.network.v2019_07_01.operations.PrivateEndpointsOperations :ivar available_private_endpoint_types: AvailablePrivateEndpointTypesOperations operations :vartype available_private_endpoint_types: azure.mgmt.network.v2019_07_01.operations.AvailablePrivateEndpointTypesOperations :ivar private_link_services: PrivateLinkServicesOperations operations :vartype private_link_services: azure.mgmt.network.v2019_07_01.operations.PrivateLinkServicesOperations :ivar public_ip_addresses: PublicIPAddressesOperations operations :vartype public_ip_addresses: azure.mgmt.network.v2019_07_01.operations.PublicIPAddressesOperations :ivar public_ip_prefixes: PublicIPPrefixesOperations operations :vartype public_ip_prefixes: azure.mgmt.network.v2019_07_01.operations.PublicIPPrefixesOperations :ivar route_filters: RouteFiltersOperations operations :vartype route_filters: azure.mgmt.network.v2019_07_01.operations.RouteFiltersOperations :ivar route_filter_rules: RouteFilterRulesOperations operations :vartype route_filter_rules: azure.mgmt.network.v2019_07_01.operations.RouteFilterRulesOperations :ivar route_tables: RouteTablesOperations operations :vartype route_tables: azure.mgmt.network.v2019_07_01.operations.RouteTablesOperations :ivar routes: RoutesOperations operations :vartype routes: azure.mgmt.network.v2019_07_01.operations.RoutesOperations :ivar bgp_service_communities: BgpServiceCommunitiesOperations operations :vartype bgp_service_communities: azure.mgmt.network.v2019_07_01.operations.BgpServiceCommunitiesOperations :ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations :vartype service_endpoint_policies: azure.mgmt.network.v2019_07_01.operations.ServiceEndpointPoliciesOperations :ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations :vartype service_endpoint_policy_definitions: azure.mgmt.network.v2019_07_01.operations.ServiceEndpointPolicyDefinitionsOperations :ivar service_tags: ServiceTagsOperations operations :vartype service_tags: azure.mgmt.network.v2019_07_01.operations.ServiceTagsOperations :ivar usages: UsagesOperations operations :vartype usages: azure.mgmt.network.v2019_07_01.operations.UsagesOperations :ivar virtual_networks: VirtualNetworksOperations operations :vartype virtual_networks: azure.mgmt.network.v2019_07_01.operations.VirtualNetworksOperations :ivar subnets: SubnetsOperations operations :vartype subnets: azure.mgmt.network.v2019_07_01.operations.SubnetsOperations :ivar resource_navigation_links: ResourceNavigationLinksOperations operations :vartype resource_navigation_links: azure.mgmt.network.v2019_07_01.operations.ResourceNavigationLinksOperations :ivar service_association_links: ServiceAssociationLinksOperations operations :vartype service_association_links: azure.mgmt.network.v2019_07_01.operations.ServiceAssociationLinksOperations :ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations :vartype virtual_network_peerings: azure.mgmt.network.v2019_07_01.operations.VirtualNetworkPeeringsOperations :ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations :vartype virtual_network_gateways: azure.mgmt.network.v2019_07_01.operations.VirtualNetworkGatewaysOperations :ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations :vartype virtual_network_gateway_connections: azure.mgmt.network.v2019_07_01.operations.VirtualNetworkGatewayConnectionsOperations :ivar local_network_gateways: LocalNetworkGatewaysOperations operations :vartype local_network_gateways: azure.mgmt.network.v2019_07_01.operations.LocalNetworkGatewaysOperations :ivar virtual_network_taps: VirtualNetworkTapsOperations operations :vartype virtual_network_taps: azure.mgmt.network.v2019_07_01.operations.VirtualNetworkTapsOperations :ivar virtual_routers: VirtualRoutersOperations operations :vartype virtual_routers: azure.mgmt.network.v2019_07_01.operations.VirtualRoutersOperations :ivar virtual_router_peerings: VirtualRouterPeeringsOperations operations :vartype virtual_router_peerings: azure.mgmt.network.v2019_07_01.operations.VirtualRouterPeeringsOperations :ivar virtual_wans: VirtualWansOperations operations :vartype virtual_wans: azure.mgmt.network.v2019_07_01.operations.VirtualWansOperations :ivar vpn_sites: VpnSitesOperations operations :vartype vpn_sites: azure.mgmt.network.v2019_07_01.operations.VpnSitesOperations :ivar vpn_site_links: VpnSiteLinksOperations operations :vartype vpn_site_links: azure.mgmt.network.v2019_07_01.operations.VpnSiteLinksOperations :ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations :vartype vpn_sites_configuration: azure.mgmt.network.v2019_07_01.operations.VpnSitesConfigurationOperations :ivar virtual_hubs: VirtualHubsOperations operations :vartype virtual_hubs: azure.mgmt.network.v2019_07_01.operations.VirtualHubsOperations :ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations :vartype hub_virtual_network_connections: azure.mgmt.network.v2019_07_01.operations.HubVirtualNetworkConnectionsOperations :ivar vpn_gateways: VpnGatewaysOperations operations :vartype vpn_gateways: azure.mgmt.network.v2019_07_01.operations.VpnGatewaysOperations :ivar vpn_connections: VpnConnectionsOperations operations :vartype vpn_connections: azure.mgmt.network.v2019_07_01.operations.VpnConnectionsOperations :ivar vpn_site_link_connections: VpnSiteLinkConnectionsOperations operations :vartype vpn_site_link_connections: azure.mgmt.network.v2019_07_01.operations.VpnSiteLinkConnectionsOperations :ivar vpn_link_connections: VpnLinkConnectionsOperations operations :vartype vpn_link_connections: azure.mgmt.network.v2019_07_01.operations.VpnLinkConnectionsOperations :ivar p2_svpn_server_configurations: P2SVpnServerConfigurationsOperations operations :vartype p2_svpn_server_configurations: azure.mgmt.network.v2019_07_01.operations.P2SVpnServerConfigurationsOperations :ivar p2_svpn_gateways: P2SVpnGatewaysOperations operations :vartype p2_svpn_gateways: azure.mgmt.network.v2019_07_01.operations.P2SVpnGatewaysOperations :ivar web_application_firewall_policies: WebApplicationFirewallPoliciesOperations operations :vartype web_application_firewall_policies: azure.mgmt.network.v2019_07_01.operations.WebApplicationFirewallPoliciesOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. :type subscription_id: str :param str base_url: Service URL :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential, # type: "TokenCredential" subscription_id, # type: str base_url=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> None if not base_url: base_url = 'https://management.azure.com' self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._serialize.client_side_validation = False self._deserialize = Deserializer(client_models) self.application_gateways = ApplicationGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.application_security_groups = ApplicationSecurityGroupsOperations( self._client, self._config, self._serialize, self._deserialize) self.available_delegations = AvailableDelegationsOperations( self._client, self._config, self._serialize, self._deserialize) self.available_resource_group_delegations = AvailableResourceGroupDelegationsOperations( self._client, self._config, self._serialize, self._deserialize) self.azure_firewalls = AzureFirewallsOperations( self._client, self._config, self._serialize, self._deserialize) self.azure_firewall_fqdn_tags = AzureFirewallFqdnTagsOperations( self._client, self._config, self._serialize, self._deserialize) self.bastion_hosts = BastionHostsOperations( self._client, self._config, self._serialize, self._deserialize) self.ddos_custom_policies = DdosCustomPoliciesOperations( self._client, self._config, self._serialize, self._deserialize) self.ddos_protection_plans = DdosProtectionPlansOperations( self._client, self._config, self._serialize, self._deserialize) self.available_endpoint_services = AvailableEndpointServicesOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.peer_express_route_circuit_connections = PeerExpressRouteCircuitConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_circuits = ExpressRouteCircuitsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_service_providers = ExpressRouteServiceProvidersOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_gateways = ExpressRouteGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_connections = ExpressRouteConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_ports_locations = ExpressRoutePortsLocationsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_ports = ExpressRoutePortsOperations( self._client, self._config, self._serialize, self._deserialize) self.express_route_links = ExpressRouteLinksOperations( self._client, self._config, self._serialize, self._deserialize) self.firewall_policies = FirewallPoliciesOperations( self._client, self._config, self._serialize, self._deserialize) self.firewall_policy_rule_groups = FirewallPolicyRuleGroupsOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancers = LoadBalancersOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations( self._client, self._config, self._serialize, self._deserialize) self.inbound_nat_rules = InboundNatRulesOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancer_outbound_rules = LoadBalancerOutboundRulesOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations( self._client, self._config, self._serialize, self._deserialize) self.load_balancer_probes = LoadBalancerProbesOperations( self._client, self._config, self._serialize, self._deserialize) self.nat_gateways = NatGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.network_interfaces = NetworkInterfacesOperations( self._client, self._config, self._serialize, self._deserialize) self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations( self._client, self._config, self._serialize, self._deserialize) self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations( self._client, self._config, self._serialize, self._deserialize) self.network_interface_tap_configurations = NetworkInterfaceTapConfigurationsOperations( self._client, self._config, self._serialize, self._deserialize) self.network_profiles = NetworkProfilesOperations( self._client, self._config, self._serialize, self._deserialize) self.network_security_groups = NetworkSecurityGroupsOperations( self._client, self._config, self._serialize, self._deserialize) self.security_rules = SecurityRulesOperations( self._client, self._config, self._serialize, self._deserialize) self.default_security_rules = DefaultSecurityRulesOperations( self._client, self._config, self._serialize, self._deserialize) self.network_watchers = NetworkWatchersOperations( self._client, self._config, self._serialize, self._deserialize) self.packet_captures = PacketCapturesOperations( self._client, self._config, self._serialize, self._deserialize) self.connection_monitors = ConnectionMonitorsOperations( self._client, self._config, self._serialize, self._deserialize) self.operations = Operations( self._client, self._config, self._serialize, self._deserialize) self.private_endpoints = PrivateEndpointsOperations( self._client, self._config, self._serialize, self._deserialize) self.available_private_endpoint_types = AvailablePrivateEndpointTypesOperations( self._client, self._config, self._serialize, self._deserialize) self.private_link_services = PrivateLinkServicesOperations( self._client, self._config, self._serialize, self._deserialize) self.public_ip_addresses = PublicIPAddressesOperations( self._client, self._config, self._serialize, self._deserialize) self.public_ip_prefixes = PublicIPPrefixesOperations( self._client, self._config, self._serialize, self._deserialize) self.route_filters = RouteFiltersOperations( self._client, self._config, self._serialize, self._deserialize) self.route_filter_rules = RouteFilterRulesOperations( self._client, self._config, self._serialize, self._deserialize) self.route_tables = RouteTablesOperations( self._client, self._config, self._serialize, self._deserialize) self.routes = RoutesOperations( self._client, self._config, self._serialize, self._deserialize) self.bgp_service_communities = BgpServiceCommunitiesOperations( self._client, self._config, self._serialize, self._deserialize) self.service_endpoint_policies = ServiceEndpointPoliciesOperations( self._client, self._config, self._serialize, self._deserialize) self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations( self._client, self._config, self._serialize, self._deserialize) self.service_tags = ServiceTagsOperations( self._client, self._config, self._serialize, self._deserialize) self.usages = UsagesOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_networks = VirtualNetworksOperations( self._client, self._config, self._serialize, self._deserialize) self.subnets = SubnetsOperations( self._client, self._config, self._serialize, self._deserialize) self.resource_navigation_links = ResourceNavigationLinksOperations( self._client, self._config, self._serialize, self._deserialize) self.service_association_links = ServiceAssociationLinksOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_network_peerings = VirtualNetworkPeeringsOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_network_gateways = VirtualNetworkGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.local_network_gateways = LocalNetworkGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_network_taps = VirtualNetworkTapsOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_routers = VirtualRoutersOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_router_peerings = VirtualRouterPeeringsOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_wans = VirtualWansOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_sites = VpnSitesOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_site_links = VpnSiteLinksOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_sites_configuration = VpnSitesConfigurationOperations( self._client, self._config, self._serialize, self._deserialize) self.virtual_hubs = VirtualHubsOperations( self._client, self._config, self._serialize, self._deserialize) self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_gateways = VpnGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_connections = VpnConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_site_link_connections = VpnSiteLinkConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.vpn_link_connections = VpnLinkConnectionsOperations( self._client, self._config, self._serialize, self._deserialize) self.p2_svpn_server_configurations = P2SVpnServerConfigurationsOperations( self._client, self._config, self._serialize, self._deserialize) self.p2_svpn_gateways = P2SVpnGatewaysOperations( self._client, self._config, self._serialize, self._deserialize) self.web_application_firewall_policies = WebApplicationFirewallPoliciesOperations( self._client, self._config, self._serialize, self._deserialize) def _send_request(self, http_request, **kwargs): # type: (HttpRequest, Any) -> HttpResponse """Runs the network request through the client's chained policies. :param http_request: The network request you want to make. Required. :type http_request: ~azure.core.pipeline.transport.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to True. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.pipeline.transport.HttpResponse """ path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } http_request.url = self._client.format_url(http_request.url, **path_format_arguments) stream = kwargs.pop("stream", True) pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) return pipeline_response.http_response def close(self): # type: () -> None self._client.close() def __enter__(self): # type: () -> NetworkManagementClient self._client.__enter__() return self def __exit__(self, *exc_details): # type: (Any) -> None self._client.__exit__(*exc_details)
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for protein models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import gzip import json import os import tarfile from typing import (Callable, List, Optional, Text) import urllib import numpy as np import tensorflow.compat.v1 as tf # tf from tensorflow.contrib import lookup as contrib_lookup import tqdm AMINO_ACID_VOCABULARY = [ 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y' ] _PFAM_GAP_CHARACTER = '.' # Other characters representing amino-acids not in AMINO_ACID_VOCABULARY. _ADDITIONAL_AA_VOCABULARY = [ # Substitutions 'U', 'O', # Ambiguous Characters 'B', 'Z', 'X', # Gap Character _PFAM_GAP_CHARACTER ] # Vocab of all possible tokens in a valid input sequence FULL_RESIDUE_VOCAB = AMINO_ACID_VOCABULARY + _ADDITIONAL_AA_VOCABULARY # Map AA characters to their index in FULL_RESIDUE_VOCAB. _RESIDUE_TO_INT = {aa: idx for idx, aa in enumerate(FULL_RESIDUE_VOCAB)} OSS_ZIPPED_MODELS_ROOT_URL = 'https://storage.googleapis.com/brain-genomics-public/research/proteins/proteinfer/models/zipped_models/' _OSS_PFAM_ZIPPED_MODELS_URL_BASE = OSS_ZIPPED_MODELS_ROOT_URL + 'noxpd2_cnn_swissprot_pfam_random_swiss-cnn_for_swissprot_pfam_random-' _OSS_EC_ZIPPED_MODELS_URL_BASE = OSS_ZIPPED_MODELS_ROOT_URL + 'noxpd2_cnn_swissprot_ec_random_swiss-cnn_for_swissprot_ec_random-' _OSS_GO_ZIPPED_MODELS_URL_BASE = OSS_ZIPPED_MODELS_ROOT_URL + 'noxpd2_cnn_swissprot_go_random_swiss-cnn_for_swissprot_go_random-' MAX_NUM_ENSEMBLE_ELS_FOR_INFERENCE = 5 PARENTHOOD_FILE_URL = 'https://storage.googleapis.com/brain-genomics-public/research/proteins/proteinfer/colab_support/parenthood.json.gz' LABEL_DESCRIPTION_URL = 'https://storage.googleapis.com/brain-genomics-public/research/proteins/proteinfer/colab_support/label_descriptions.json.gz' INSTALLED_PARENTHOOD_FILE_NAME = 'parenthood.json.gz' INSTALLED_LABEL_DESCRIPTION_FILE_NAME = 'label_descriptions.json.gz' # pyformat: disable PFAM_RANDOM_ENSEMBLE_ELEMENT_EXPERIMENT_IDS = [ '13703743', '13703976', '13704038', '13704097', '13704156', '13705318', '13705635', '13705680', '13705733', '13705759', '13705805', '13706336', '13707555', '13707708', '13707739', '13707862', '13708715', '13708866', '13709033', '13709258', '13709363', '13709600', '13709998', '13710430', '13711765', '13729975', '13730021', '13730128', '13730776', '13730885', '13731191', '13731551', '13731565', '13731695', '13732031', ] EC_RANDOM_ENSEMBLE_ELEMENT_EXPERIMENT_IDS = [ '13703966', '13704083', '13704104', '13704130', '13705280', '13705675', '13705786', '13705802', '13705819', '13705839', '13706239', '13706986', '13707020', '13707589', '13707925', '13708369', '13708672', '13708706', '13708740', '13708951', '13709242', '13709584', '13709983', '13710037', '13711670', '13729344', '13730041', '13730097', '13730679', '13730876', '13730909', '13731218', '13731588', '13731728', '13731976', ] GO_RANDOM_ENSEMBLE_ELEMENT_EXPERIMENT_IDS = [ '13703706', '13703742', '13703997', '13704131', '13705631', '13705668', '13705677', '13705689', '13705708', '13705728', '13706170', '13706215', '13707414', '13707438', '13707732', '13708169', '13708676', '13708925', '13708995', '13709052', '13709428', '13709589', '13710370', '13710418', '13711677', '13729352', '13730011', '13730387', '13730746', '13730766', '13730958', '13731179', '13731598', '13731645', '13732022', ] # pyformat: enable OSS_PFAM_ZIPPED_MODELS_URLS = [ '{}{}.tar.gz'.format(_OSS_PFAM_ZIPPED_MODELS_URL_BASE, p) for p in PFAM_RANDOM_ENSEMBLE_ELEMENT_EXPERIMENT_IDS ] OSS_EC_ZIPPED_MODELS_URLS = [ '{}{}.tar.gz'.format(_OSS_EC_ZIPPED_MODELS_URL_BASE, p) for p in EC_RANDOM_ENSEMBLE_ELEMENT_EXPERIMENT_IDS ] OSS_GO_ZIPPED_MODELS_URLS = [ '{}{}.tar.gz'.format(_OSS_GO_ZIPPED_MODELS_URL_BASE, p) for p in GO_RANDOM_ENSEMBLE_ELEMENT_EXPERIMENT_IDS ] def residues_to_indices(amino_acid_residues): return [_RESIDUE_TO_INT[c] for c in amino_acid_residues] def normalize_sequence_to_blosum_characters(seq): """Make substitutions, since blosum62 doesn't include amino acids U and O. We take the advice from here for the appropriate substitutions: https://www.cgl.ucsf.edu/chimera/docs/ContributedSoftware/multalignviewer/multalignviewer.html Args: seq: amino acid sequence. A string. Returns: An amino acid sequence string that's compatible with the blosum substitution matrix. """ return seq.replace('U', 'C').replace('O', 'X') @functools.lru_cache(maxsize=1) def _build_one_hot_encodings(): """Create array of one-hot embeddings. Row `i` of the returned array corresponds to the one-hot embedding of amino acid FULL_RESIDUE_VOCAB[i]. Returns: np.array of shape `[len(FULL_RESIDUE_VOCAB), 20]`. """ base_encodings = np.eye(len(AMINO_ACID_VOCABULARY)) to_aa_index = AMINO_ACID_VOCABULARY.index special_mappings = { 'B': .5 * (base_encodings[to_aa_index('D')] + base_encodings[to_aa_index('N')]), 'Z': .5 * (base_encodings[to_aa_index('E')] + base_encodings[to_aa_index('Q')]), 'X': np.ones(len(AMINO_ACID_VOCABULARY)) / len(AMINO_ACID_VOCABULARY), _PFAM_GAP_CHARACTER: np.zeros(len(AMINO_ACID_VOCABULARY)), } special_mappings['U'] = base_encodings[to_aa_index('C')] special_mappings['O'] = special_mappings['X'] special_encodings = np.array( [special_mappings[c] for c in _ADDITIONAL_AA_VOCABULARY]) return np.concatenate((base_encodings, special_encodings), axis=0) def residues_to_one_hot(amino_acid_residues): """Given a sequence of amino acids, return one hot array. Supports ambiguous amino acid characters B, Z, and X by distributing evenly over possible values, e.g. an 'X' gets mapped to [.05, .05, ... , .05]. Supports rare amino acids by appropriately substituting. See normalize_sequence_to_blosum_characters for more information. Supports gaps and pads with the '.' and '-' characters; which are mapped to the zero vector. Args: amino_acid_residues: string. consisting of characters from AMINO_ACID_VOCABULARY Returns: A numpy array of shape (len(amino_acid_residues), len(AMINO_ACID_VOCABULARY)). Raises: KeyError: if amino_acid_residues has a character not in FULL_RESIDUE_VOCAB. """ residue_encodings = _build_one_hot_encodings() int_sequence = residues_to_indices(amino_acid_residues) return residue_encodings[int_sequence] def fasta_indexer(): """Get a function for converting tokenized protein strings to indices.""" mapping = tf.constant(FULL_RESIDUE_VOCAB) table = contrib_lookup.index_table_from_tensor(mapping) def mapper(residues): return tf.ragged.map_flat_values(table.lookup, residues) return mapper def fasta_encoder(): """Get a function for converting indexed amino acids to one-hot encodings.""" encoded = residues_to_one_hot(''.join(FULL_RESIDUE_VOCAB)) one_hot_embeddings = tf.constant(encoded, dtype=tf.float32) def mapper(residues): return tf.ragged.map_flat_values( tf.gather, indices=residues, params=one_hot_embeddings) return mapper def in_graph_residues_to_onehot(residues): """Performs mapping in `residues_to_one_hot` in-graph. Args: residues: A tf.RaggedTensor with tokenized residues. Returns: A tuple of tensors (one_hots, row_lengths): `one_hots` is a Tensor<shape=[None, None, len(AMINO_ACID_VOCABULARY)], dtype=tf.float32> that contains a one_hot encoding of the residues and pads out all the residues to the max sequence length in the batch by 0s. `row_lengths` is a Tensor<shape=[None], dtype=tf.int32> with the length of the unpadded sequences from residues. Raises: tf.errors.InvalidArgumentError: if `residues` contains a token not in `FULL_RESIDUE_VOCAB`. """ ragged_one_hots = fasta_encoder()(fasta_indexer()(residues)) return (ragged_one_hots.to_tensor(default_value=0), tf.cast(ragged_one_hots.row_lengths(), dtype=tf.int32)) def calculate_bucket_batch_sizes(bucket_boundaries, max_expected_sequence_size, largest_batch_size): """Calculated batch sizes for each bucket given a set of boundaries. Sequences in the smallest sized bucket will get a batch_size of largest_batch_size and larger buckets will have smaller batch sizes in proportion to their maximum sequence length to ensure that they do not use too much memory. E.g. for bucket_boundaries of [5, 10, 20, 40], max_expected_size of 100 and largest_batch_size of 50, expected_bucket_sizes are [50, 25, 12, 6, 2]. Args: bucket_boundaries: list of positions of bucket boundaries max_expected_sequence_size: largest expected sequence, used to calculate sizes largest_batch_size: batch_size for largest batches. Returns: batch_sizes as list """ first_max_size = bucket_boundaries[0] bucket_relative_batch_sizes = [ (first_max_size / x) for x in bucket_boundaries + [max_expected_sequence_size] ] bucket_absolute_batch_sizes = [ int(x * largest_batch_size) for x in bucket_relative_batch_sizes ] if min(bucket_absolute_batch_sizes) == 0: raise ValueError( 'There would be a batch size of 0 during bucketing, which is not ' 'allowed. Bucket boundaries passed in were: %s, leading to batch sizes of: %s' % (bucket_boundaries, bucket_absolute_batch_sizes)) return bucket_absolute_batch_sizes def batch_iterable(iterable, batch_size): """Yields batches from an iterable. If the number of elements in the iterator is not a multiple of batch size, the last batch will have fewer elements. Args: iterable: a potentially infinite iterable. batch_size: the size of batches to return. Yields: array of length batch_size, containing elements, in order, from iterable. Raises: ValueError: if batch_size < 1. """ if batch_size < 1: raise ValueError( 'Cannot have a batch size of less than 1. Received: {}'.format( batch_size)) current = [] for item in iterable: if len(current) == batch_size: yield current current = [] current.append(item) # Prevent yielding an empty batch. Instead, prefer to end the generation. if current: yield current def pad_one_hot(one_hot, length): if length < one_hot.shape[0]: raise ValueError("The padding value must be longer than the one-hot's 0th " 'dimension. Padding value is ' + str(length) + ' ' 'and one-hot shape is ' + str(one_hot.shape)) padding = np.zeros((length - one_hot.shape[0], len(AMINO_ACID_VOCABULARY))) return np.append(one_hot, padding, axis=0) def make_padded_np_array(ragged_arrays): """Converts ragged array of one-hot amino acids to constant-length np.array. Args: ragged_arrays: list of list of int. Each entry in the list is a one-hot encoded protein, where each entry corresponds to an amino acid. Returns: np.array of int, shape (len(ragged_arrays), len(longest_array_in_ragged_arrays), len(AMINO_ACID_VOCABULARY)). """ max_array_length = max(len(a) for a in ragged_arrays) return np.array([ pad_one_hot(ragged_array, max_array_length) for ragged_array in ragged_arrays ]) def absolute_paths_of_files_in_dir(dir_path): files = os.listdir(dir_path) return sorted([os.path.join(dir_path, f) for f in files]) def load_gz_json(path): with open(path, 'rb') as f: with gzip.GzipFile(fileobj=f, mode='rb') as gzip_file: return json.load(gzip_file) def fetch_oss_pretrained_models( model_type, output_dir_path, num_ensemble_elements = None): """Fetch, unzip, and untar a number of models to output_dir_path. Does not store the tar.gz versions, just the unzipped ones. Args: model_type: one of Pfam, EC, or GO. output_dir_path: output directory to which ensemble elements should be written. num_ensemble_elements: number of elements to fetch. If None, fetch all available. Raises: ValueError if model_type is invalid, or num_ensemble_elements is too large. """ if model_type.lower() == 'pfam': absolute_model_urls = OSS_PFAM_ZIPPED_MODELS_URLS elif model_type.lower() == 'ec': absolute_model_urls = OSS_EC_ZIPPED_MODELS_URLS elif model_type.lower() == 'go': absolute_model_urls = OSS_GO_ZIPPED_MODELS_URLS else: raise ValueError( 'Given model type {} was not valid. Valid model types are {}'.format( model_type, ['Pfam', 'EC', 'GO'])) num_ensemble_elements = num_ensemble_elements if num_ensemble_elements is not None else len( absolute_model_urls) if num_ensemble_elements > len(absolute_model_urls): raise ValueError( 'Requested {} ensemble elements, but only {} were available.'.format( num_ensemble_elements, len(absolute_model_urls))) absolute_model_urls = absolute_model_urls[:num_ensemble_elements] for absolute_url in tqdm.tqdm( absolute_model_urls, desc='Downloading and unzipping {} models to {}'.format( model_type, output_dir_path), position=0, leave=True): # TODO(mlbileschi): consider parallelizing to make faster. relative_file_name = os.path.basename(os.path.normpath(absolute_url)) output_path = os.path.join(output_dir_path, relative_file_name) with urllib.request.urlopen(absolute_url) as url_contents: with tarfile.open(fileobj=url_contents, mode='r|gz') as tar: tar.extractall(output_dir_path)
import copy import unicodedata from JumpScale import j import traceback try: import ujson as json except ImportError: import json import JumpScale.baselib.hash LEVELMAP = {1: 'CRITICAL', 2: 'WARNING', 3: 'INFO', 4: 'DEBUG'} class ErrorConditionObject(): """ @param type #BUG,INPUT,MONITORING,OPERATIONS,PERFORMANCE,UNKNOWN @param level #1:critical, 2:warning, 3:info """ def __init__(self,ddict={},msg="",msgpub="",category="",level=1,type="UNKNOWN",tb=None): if ddict<>{}: self.__dict__=ddict else: self.backtrace="" self.backtraceDetailed="" btkis,filename0,linenr0,func0=j.errorconditionhandler.getErrorTraceKIS(tb=tb) if len(btkis)>1: self.backtrace=self.getBacktrace(btkis,filename0,linenr0,func0) self.guid=j.base.idgenerator.generateGUID() #is for default case where there is no redis self.category=category #is category in dot notation self.errormessage=msg self.errormessagePub=msgpub self.level=int(level) #1:critical, 2:warning, 3:info see j.enumerators.ErrorConditionLevel. if len(btkis)>1: self.code=btkis[-1][0] self.funcname=func0 self.funcfilename=filename0 self.funclinenr=linenr0 else: self.code="" self.funcname="" self.funcfilename="" self.funclinenr="" self.appname=j.application.appname #name as used by application self.gid = j.application.whoAmI.gid self.nid = j.application.whoAmI.nid if hasattr(j, 'core') and hasattr(j.core, 'grid') and hasattr(j.core.grid, 'aid'): self.aid = j.core.grid.aid self.pid = j.application.whoAmI.pid self.jid = 0 self.masterjid = 0 self.epoch= j.base.time.getTimeEpoch() self.type=str(type) #BUG,INPUT,MONITORING,OPERATIONS,PERFORMANCE,UNKNOWN self.tb=tb self.tags="" #e.g. machine:2323 self.state="NEW" #["NEW","ALERT","CLOSED"] self.lasttime=0 #last time there was an error condition linked to this alert self.closetime=0 #alert is closed, no longer active self.occurrences=1 #nr of times this error condition happened self.uniquekey="" def getUniqueKey(self): """ return unique key for object, is used to define unique id """ if self.category<>"": C= "%s_%s_%s_%s_%s_%s_%s_%s"%(self.gid,self.nid,self.category,self.level,self.funcname,self.funcfilename,self.appname,self.type) else: C= "%s_%s_%s_%s_%s_%s_%s_%s"%(self.gid,self.nid,self.errormessage,self.level,self.funcname,self.funcfilename,self.appname,self.type) self.uniquekey=j.tools.hash.md5_string(C) return self.uniquekey def toAscii(self): def _toAscii(s): try: return unicodedata.normalize('NFKD', unicode(s)).encode('ascii','ignore') except Exception,e: print "BUG" import ipdb ipdb.set_trace() self.errormessage=_toAscii(self.errormessage) self.errormessagePub=_toAscii(self.errormessagePub) self.errormessagePub=_toAscii(self.errormessagePub) self.backtraceDetailed=_toAscii(self.backtraceDetailed) def process(self): self.toAscii() if self.type in ["INPUT","MONITORING","OPERATIONS","PERFORMANCE"] and j.application.debug==False: self.tb="" self.code="" self.backtrace="" self.backtraceDetailed="" # types=["INPUT","MONITORING","OPERATIONS","PERFORMANCE","BUG","UNKNOWN"] # if not self.type in types: # j.events.inputerror_warning("Errorcondition was thrown with wrong type.\n%s"%str(self),"eco.check.type") if not j.basetype.integer.check(self.level): try: self.level=int(self.level) except: pass if not j.basetype.integer.check(param.level): self.level=1 j.events.inputerror_warning("Errorcondition was thrown with wrong level, needs to be int.\n%s"%str(self),"eco.check.level") if self.level>4: j.events.inputerror_warning("Errorcondition was thrown with wrong level, needs to be max 4.\n%s"%str(self),"eco.check.level") self.level=4 res=j.errorconditionhandler._send2Redis(self) if res<>None: self.__dict__=res def toJson(self): data = self.__dict__.copy() data.pop('tb', None) return json.dumps(data) def __str__(self): content="\n\n***ERROR***\n" if self.backtrace<>"": content="%s\n" % self.backtrace content+="type/level: %s/%s\n" % (self.type,self.level) content+="%s\n" % self.errormessage if self.errormessagePub<>"": content+="errorpub: %s\n" % self.errormessagePub return content __repr__=__str__ def log2filesystem(self): """ write errorcondition to filesystem """ j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.logDir,"errors",j.application.appname)) path=j.system.fs.joinPaths(j.dirs.logDir,"errors",j.application.appname,"backtrace_%s.log"%(j.base.time.getLocalTimeHRForFilesystem())) msg="***ERROR BACKTRACE***\n" msg+="%s\n"%self.backtrace msg+="***ERROR MESSAGE***\n" msg+="%s\n"%self.errormessage if self.errormessagePub<>"": msg+="%s\n"%self.errormessagePub if len(j.logger.logs)>0: msg+="\n***LOG MESSAGES***\n" for log in j.logger.logs: msg+="%s\n"%log msg+="***END***\n" j.system.fs.writeFile(path,msg) return path def getBacktrace(self,btkis=None,filename0=None,linenr0=None,func0=None): if btkis==None: btkis,filename0,linenr0,func0=j.errorconditionhandler.getErrorTraceKIS() out="" # out="File:'%s'\nFunction:'%s'\n"%(filename0,func0) # out+="Linenr:%s\n*************************************************************\n\n"%linenr0 # btkis.reverse() for filename,func,linenr,code,linenrOverall in btkis: # print "AAAAAA:%s %s"%(func,filename) # print "BBBBBB:%s"%linenr # out+="%-15s : %s\n"%(func,filename) out+=" File \"%s\" Line %s, in %s\n"%(filename,linenrOverall,func) c=0 code2="" for line in code.split("\n"): if c==linenr: if len(line)>120: line=line[0:120] # out+=" %-13s : %s\n"%(linenrOverall,line.strip()) out+=" %s\n"%line.strip() # pre=" *** " # else: # pre=" " # code2+="%s%s\n"%(pre,line) c+=1 # for line in code2.split("\n"): # if len(line)>90: # out+="%s\n"%line[0:90] # line=line[90:] # while len(line)>90: # line0=line[0:75] # out+=" ...%s\n"%line0 # line=line[75:] # out+=" ...%s\n"%line # else: # out+="%s\n"%line # out+="-------------------------------------------------------------------\n" self.backtraceDetailed=out return out # stack="" # if j.application.skipTraceback: # return stack # for x in traceback.format_stack(): # ignore=False # if x.find("IPython")<>-1 or x.find("MessageHandler")<>-1 \ # or x.find("EventHandler")<>-1 or x.find("ErrorconditionObject")<>-1 \ # or x.find("traceback.format")<>-1 or x.find("ipython console")<>-1: # ignore=True # stack = "%s"%(stack+x if not ignore else stack) # if len(stack)>50: # self.backtrace=stack # return # self.backtrace=stack def _filterLocals(self,k,v): try: k="%s"%k v="%s"%v if k in ["re","q","jumpscale","pprint","qexec","jshell","Shell","__doc__","__file__","__name__","__package__","i","main","page"]: return False if v.find("<module")<>-1: return False if v.find("IPython")<>-1: return False if v.find("<built-in function")<>-1: return False if v.find("jumpscale.Shell")<>-1: return False except: return False return True def getBacktraceDetailed(self,tracebackObject=""): """ Get stackframe log is a very detailed log with filepaths, code locations & global vars, this output can become quite big """ import inspect if j.application.skipTraceback: return "" sep="\n"+"-"*90+"\n" result = '' if not tracebackObject: return "" #@todo needs to be fixed so it does work if tracebackObject==None: tracebackObject = inspect.currentframe() #@todo does not work frames = inspect.getinnerframes(tracebackObject, 16) nrlines=0 for (frame, filename, lineno, fun, context, idx) in frames: ##result = result + "-"*50 + "\n\n" nrlines+=1 if nrlines>100: return result location=filename + "(line %d) (function %s)\n" % (lineno, fun) if location.find("EventHandler.py")==-1: result += " " + sep result += " " + location result += " " + "========== STACKFRAME==========\n" if context: l = 0 for line in context: prefix = " " if l == idx: prefix = "--> " l += 1 result += prefix + line nrlines+=1 if nrlines>100: return result result += " " + "============ LOCALS============\n" for (k,v) in sorted(frame.f_locals.iteritems()): if self._filterLocals(k,v): try: result += " %s : %s\n" % (str(k), str(v)) except: pass nrlines+=1 if nrlines>100: return result ##result += " " + "============ GLOBALS============\n" ##for (k,v) in sorted(frame.f_globals.iteritems()): ## if self._filterLocals(k,v): ## result += " %s : %s\n" % (str(k), str(v)) self.backtrace=result def getCategory(self): return "eco" def getObjectType(self): return 3 def getVersion(self): return 1 def getMessage(self): #[$objecttype,$objectversion,guid,$object=data] return [3,1,self.guid,self.__dict__] def getContentKey(self): """ return unique key for object, is used to define unique id """ dd=copy.copy(self.__dict__) if dd.has_key("_ckey"): dd.pop("_ckey") if dd.has_key("id"): dd.pop("id") if dd.has_key("guid"): dd.pop("guid") if dd.has_key("sguid"): dd.pop("sguid") return j.base.byteprocessor.hashMd5(str(dd))
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from pycadf import cadftaxonomy as taxonomy from six.moves.urllib import parse from keystone.auth import plugins as auth_plugins from keystone.auth.plugins import base from keystone.common import dependency from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _ from keystone.models import token_model from keystone import notifications METHOD_NAME = 'mapped' @dependency.requires('federation_api', 'identity_api', 'resource_api', 'token_provider_api') class Mapped(base.AuthMethodHandler): def _get_token_ref(self, auth_payload): token_id = auth_payload['id'] response = self.token_provider_api.validate_token(token_id) return token_model.KeystoneToken(token_id=token_id, token_data=response) def authenticate(self, request, auth_payload, auth_context): """Authenticate mapped user and set an authentication context. :param request: keystone's request context :param auth_payload: the content of the authentication for a given method :param auth_context: user authentication context, a dictionary shared by all plugins. In addition to ``user_id`` in ``auth_context``, this plugin sets ``group_ids``, ``OS-FEDERATION:identity_provider`` and ``OS-FEDERATION:protocol`` """ if 'id' in auth_payload: token_ref = self._get_token_ref(auth_payload) handle_scoped_token(request, auth_payload, auth_context, token_ref, self.federation_api, self.identity_api, self.token_provider_api) else: handle_unscoped_token(request, auth_payload, auth_context, self.resource_api, self.federation_api, self.identity_api) def handle_scoped_token(request, auth_payload, auth_context, token_ref, federation_api, identity_api, token_provider_api): utils.validate_expiration(token_ref) token_audit_id = token_ref.audit_id identity_provider = token_ref.federation_idp_id protocol = token_ref.federation_protocol_id user_id = token_ref.user_id group_ids = token_ref.federation_group_ids send_notification = functools.partial( notifications.send_saml_audit_notification, 'authenticate', request.context_dict, user_id, group_ids, identity_provider, protocol, token_audit_id) utils.assert_enabled_identity_provider(federation_api, identity_provider) try: mapping = federation_api.get_mapping_from_idp_and_protocol( identity_provider, protocol) utils.validate_groups(group_ids, mapping['id'], identity_api) except Exception: # NOTE(topol): Diaper defense to catch any exception, so we can # send off failed authentication notification, raise the exception # after sending the notification send_notification(taxonomy.OUTCOME_FAILURE) raise else: send_notification(taxonomy.OUTCOME_SUCCESS) auth_context['user_id'] = user_id auth_context['group_ids'] = group_ids auth_context[federation_constants.IDENTITY_PROVIDER] = identity_provider auth_context[federation_constants.PROTOCOL] = protocol def handle_unscoped_token(request, auth_payload, auth_context, resource_api, federation_api, identity_api): def is_ephemeral_user(mapped_properties): return mapped_properties['user']['type'] == utils.UserType.EPHEMERAL def build_ephemeral_user_context(auth_context, user, mapped_properties, identity_provider, protocol): auth_context['user_id'] = user['id'] auth_context['group_ids'] = mapped_properties['group_ids'] auth_context[federation_constants.IDENTITY_PROVIDER] = ( identity_provider) auth_context[federation_constants.PROTOCOL] = protocol def build_local_user_context(auth_context, mapped_properties): user_info = auth_plugins.UserAuthInfo.create(mapped_properties, METHOD_NAME) auth_context['user_id'] = user_info.user_id assertion = extract_assertion_data(request) identity_provider = auth_payload['identity_provider'] protocol = auth_payload['protocol'] utils.assert_enabled_identity_provider(federation_api, identity_provider) group_ids = None # NOTE(topol): The user is coming in from an IdP with a SAML assertion # instead of from a token, so we set token_id to None token_id = None # NOTE(marek-denis): This variable is set to None and there is a # possibility that it will be used in the CADF notification. This means # operation will not be mapped to any user (even ephemeral). user_id = None try: try: mapped_properties, mapping_id = apply_mapping_filter( identity_provider, protocol, assertion, resource_api, federation_api, identity_api) except exception.ValidationError as e: # if mapping is either invalid or yield no valid identity, # it is considered a failed authentication raise exception.Unauthorized(e) if is_ephemeral_user(mapped_properties): unique_id, display_name = ( get_user_unique_id_and_display_name(request, mapped_properties) ) user = identity_api.shadow_federated_user(identity_provider, protocol, unique_id, display_name) user_id = user['id'] group_ids = mapped_properties['group_ids'] utils.validate_groups_cardinality(group_ids, mapping_id) build_ephemeral_user_context(auth_context, user, mapped_properties, identity_provider, protocol) else: build_local_user_context(auth_context, mapped_properties) except Exception: # NOTE(topol): Diaper defense to catch any exception, so we can # send off failed authentication notification, raise the exception # after sending the notification outcome = taxonomy.OUTCOME_FAILURE notifications.send_saml_audit_notification('authenticate', request.context_dict, user_id, group_ids, identity_provider, protocol, token_id, outcome) raise else: outcome = taxonomy.OUTCOME_SUCCESS notifications.send_saml_audit_notification('authenticate', request.context_dict, user_id, group_ids, identity_provider, protocol, token_id, outcome) def extract_assertion_data(request): assertion = dict(utils.get_assertion_params_from_env(request)) return assertion def apply_mapping_filter(identity_provider, protocol, assertion, resource_api, federation_api, identity_api): idp = federation_api.get_idp(identity_provider) utils.validate_idp(idp, protocol, assertion) mapped_properties, mapping_id = federation_api.evaluate( identity_provider, protocol, assertion) # NOTE(marek-denis): We update group_ids only here to avoid fetching # groups identified by name/domain twice. # NOTE(marek-denis): Groups are translated from name/domain to their # corresponding ids in the auth plugin, as we need information what # ``mapping_id`` was used as well as idenity_api and resource_api # objects. group_ids = mapped_properties['group_ids'] utils.validate_groups_in_backend(group_ids, mapping_id, identity_api) group_ids.extend( utils.transform_to_group_ids( mapped_properties['group_names'], mapping_id, identity_api, resource_api)) mapped_properties['group_ids'] = list(set(group_ids)) return mapped_properties, mapping_id def get_user_unique_id_and_display_name(request, mapped_properties): """Setup federated username. Function covers all the cases for properly setting user id, a primary identifier for identity objects. Initial version of the mapping engine assumed user is identified by ``name`` and his ``id`` is built from the name. We, however need to be able to accept local rules that identify user by either id or name/domain. The following use-cases are covered: 1) If neither user_name nor user_id is set raise exception.Unauthorized 2) If user_id is set and user_name not, set user_name equal to user_id 3) If user_id is not set and user_name is, set user_id as url safe version of user_name. :param request: current request object :param mapped_properties: Properties issued by a RuleProcessor. :type: dictionary :raises keystone.exception.Unauthorized: If neither `user_name` nor `user_id` is set. :returns: tuple with user identification :rtype: tuple """ user = mapped_properties['user'] user_id = user.get('id') user_name = user.get('name') or request.remote_user if not any([user_id, user_name]): msg = _("Could not map user while setting ephemeral user identity. " "Either mapping rules must specify user id/name or " "REMOTE_USER environment variable must be set.") raise exception.Unauthorized(msg) elif not user_name: user['name'] = user_id elif not user_id: user_id = user_name user['id'] = parse.quote(user_id) return (user['id'], user['name'])
""" .. module:: report.views. :synopsis: Handles the report URL endpoints for the OpenRecords application """ from datetime import datetime, timedelta from calendar import monthrange from io import BytesIO from flask import ( current_app, flash, render_template, jsonify, redirect, request, url_for, send_file ) from flask_login import current_user, login_required from app.constants import ( request_status ) from app.lib.date_utils import local_to_utc from app.models import ( Agencies, Requests, UserRequests ) from app.report import report from app.report.forms import ( AcknowledgmentForm, ReportFilterForm, MonthlyMetricsReportForm, OpenDataReportForm ) from app.report.utils import ( generate_acknowledgment_report, generate_monthly_metrics_report, generate_open_data_report ) @report.route('/show', methods=['GET']) def show_report(): """ This function handles the rendering of the reports page :return: redirect to reports page """ return render_template('report/reports.html', acknowledgment_form=AcknowledgmentForm(), monthly_report_form=MonthlyMetricsReportForm(), report_filter_form=ReportFilterForm(), open_data_report_form=OpenDataReportForm()) @report.route('/', methods=['GET']) def get(): """ This function handles the retrieval of report data to generate the chart on the frontend. Takes in agency_ein or user_guid from the frontend and filters for the number of requests closed and requests opened. :return: json object({"labels": ["Opened", "Closed"], "values": [150, 135], "active_users": [('', ''), ('o8pj0k', 'John Doe')]}), 200 """ agency_ein = request.args.get('agency_ein') user_guid = request.args.get('user_guid', '') requests_opened = 0 requests_closed = 0 active_users = [] is_visible = False results = False if agency_ein and user_guid == '': if agency_ein == 'all': active_requests = Requests.query.with_entities(Requests.status).join( Agencies, Requests.agency_ein == Agencies.ein).filter( Agencies.is_active).all() requests_closed = len([r for r in active_requests if r[0] == request_status.CLOSED]) requests_opened = len(active_requests) - requests_closed else: active_requests = Requests.query.with_entities(Requests.status).join( Agencies, Requests.agency_ein == Agencies.ein).filter( Agencies.ein == agency_ein, Agencies.is_active).all() requests_closed = len([r for r in active_requests if r[0] == request_status.CLOSED]) requests_opened = len(active_requests) - requests_closed if not (current_user.is_anonymous or current_user.is_public): if (current_user.is_agency and current_user.is_agency_admin(agency_ein)) or current_user.is_super: is_visible = True if current_user.is_agency_admin(agency_ein) or current_user.is_super: active_users = sorted( [(user.guid, user.name) for user in Agencies.query.filter_by(ein=agency_ein).one().active_users], key=lambda x: x[1]) elif current_user.is_agency_active(agency_ein): active_users = [(current_user.guid, current_user.name)] if active_users: active_users.insert(0, ('', '')) results = True elif user_guid and (current_user.is_agency_active(agency_ein) or current_user.is_agency_admin(agency_ein) or current_user.is_super): is_visible = True ureqs = UserRequests.query.filter(UserRequests.user_guid == user_guid ).all() requests_closed = len([u for u in ureqs if u.request.status == request_status.CLOSED]) requests_opened = len([u for u in ureqs if u.request.status != request_status.CLOSED]) return jsonify({"labels": ["Open", "Closed"], "values": [requests_opened, requests_closed], "active_users": active_users, "is_visible": is_visible, "results": results }), 200 @report.route('/acknowledgment', methods=['POST']) @login_required def acknowledgment(): """Generates the acknowledgment report. Returns: Template with context. """ acknowledgment_form = AcknowledgmentForm() if acknowledgment_form.validate_on_submit(): # Only agency administrators can access endpoint if not current_user.is_agency_admin: return jsonify({ 'error': 'Only Agency Administrators can access this endpoint.' }), 403 date_from = local_to_utc(datetime.strptime(request.form['date_from'], '%m/%d/%Y'), current_app.config['APP_TIMEZONE']) date_to = local_to_utc(datetime.strptime(request.form['date_to'], '%m/%d/%Y'), current_app.config['APP_TIMEZONE']) redis_key = '{current_user_guid}-{report_type}-{agency_ein}-{timestamp}'.format( current_user_guid=current_user.guid, report_type='acknowledgment', agency_ein=current_user.default_agency_ein, timestamp=datetime.now(), ) generate_acknowledgment_report.apply_async(args=[current_user.guid, date_from, date_to], serializer='pickle', task_id=redis_key) flash('Your report is being generated. You will receive an email with the report attached once its complete.', category='success') else: for field, _ in acknowledgment_form.errors.items(): flash(acknowledgment_form.errors[field][0], category='danger') return redirect(url_for("report.show_report")) @report.route('/monthly-metrics-report', methods=['POST']) @login_required def monthly_metrics_report(): """Generates the monthly metrics report. Returns: Template with context. """ monthly_report_form = MonthlyMetricsReportForm() if monthly_report_form.validate_on_submit(): # Only agency administrators can access endpoint if not current_user.is_agency_admin: return jsonify({ 'error': 'Only Agency Administrators can access this endpoint.' }), 403 # Date conversions date_from = request.form['year'] + '-' + request.form['month'] + '-' + '01' end_of_month = monthrange(int(request.form['year']), int(request.form['month']))[1] date_to = request.form['year'] + '-' + request.form['month'] + '-' + str(end_of_month) redis_key = '{current_user_guid}-{report_type}-{agency_ein}-{timestamp}'.format( current_user_guid=current_user.guid, report_type='metrics', agency_ein=current_user.default_agency_ein, timestamp=datetime.now() ) generate_monthly_metrics_report.apply_async(args=[current_user.default_agency_ein, date_from, date_to, [current_user.email]], serializer='pickle', task_id=redis_key) flash('Your report is being generated. You will receive an email with the report attached once its complete.', category='success') else: for field, _ in monthly_report_form.errors.items(): flash(monthly_report_form.errors[field][0], category='danger') return redirect(url_for("report.show_report")) @report.route('/open-data-report', methods=['POST']) @login_required def open_data_report(): """Generates the Open Data Compliance report. Returns: Template with context. """ open_data_report_form = OpenDataReportForm() if open_data_report_form.validate_on_submit(): # Only agency administrators can access endpoint if not current_user.is_agency_admin: return jsonify({ 'error': 'Only Agency Administrators can access this endpoint.' }), 403 date_from = local_to_utc(datetime.strptime(request.form['date_from'], '%m/%d/%Y'), current_app.config['APP_TIMEZONE']) date_to = local_to_utc(datetime.strptime(request.form['date_to'], '%m/%d/%Y'), current_app.config['APP_TIMEZONE']) + timedelta(days=1) open_data_report_spreadsheet = generate_open_data_report(current_user.default_agency_ein, date_from, date_to) date_from_string = date_from.strftime('%Y%m%d') date_to_string = date_to.strftime('%Y%m%d') return send_file( BytesIO(open_data_report_spreadsheet), attachment_filename='open_data_compliance_report_{}_{}.xls'.format(date_from_string, date_to_string), as_attachment=True ) else: for field, _ in open_data_report_form.errors.items(): flash(open_data_report_form.errors[field][0], category='danger') return redirect(url_for('report.show_report'))
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import tempfile import webob from oslo.config import cfg from cinder.brick.local_dev import lvm as brick_lvm from cinder import context from cinder import db from cinder import exception from cinder.openstack.common import jsonutils from cinder.openstack.common import timeutils from cinder import test from cinder.tests.api import fakes from cinder.tests.api.v2 import stubs from cinder.tests import cast_as_call from cinder.volume import api as volume_api CONF = cfg.CONF def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v2'] = api return mapper class AdminActionsTest(test.TestCase): def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = tempfile.mkdtemp() self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') self.flags(lock_path=self.tempdir, disable_process_locking=True) self.volume_api = volume_api.API() cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) def test_reset_status_as_admin(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available'}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) # status changed to 'error' self.assertEqual(volume['status'], 'error') def test_reset_status_as_non_admin(self): # current status is 'error' volume = db.volume_create(context.get_admin_context(), {'status': 'error', 'size': 1}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request changing status to available req.body = jsonutils.dumps({'os-reset_status': {'status': 'available'}}) # non-admin context req.environ['cinder.context'] = context.RequestContext('fake', 'fake') resp = req.get_response(app()) # request is not authorized self.assertEqual(resp.status_int, 403) volume = db.volume_get(context.get_admin_context(), volume['id']) # status is still 'error' self.assertEqual(volume['status'], 'error') def test_malformed_reset_status_body(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'size': 1}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # malformed request body req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # bad request self.assertEqual(resp.status_int, 400) volume = db.volume_get(ctx, volume['id']) # status is still 'available' self.assertEqual(volume['status'], 'available') def test_invalid_status_for_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'size': 1}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'invalid' is not a valid status req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # bad request self.assertEqual(resp.status_int, 400) volume = db.volume_get(ctx, volume['id']) # status is still 'available' self.assertEqual(volume['status'], 'available') def test_reset_status_for_missing_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # missing-volume-id req = webob.Request.blank('/v2/fake/volumes/%s/action' % 'missing-volume-id') req.method = 'POST' req.headers['content-type'] = 'application/json' # malformed request body req.body = jsonutils.dumps({'os-reset_status': {'status': 'available'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # not found self.assertEqual(resp.status_int, 404) self.assertRaises(exception.NotFound, db.volume_get, ctx, 'missing-volume-id') def test_reset_attached_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'attached'}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request update attach_status to detached body = {'os-reset_status': {'status': 'available', 'attach_status': 'detached'}} req.body = jsonutils.dumps(body) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) # attach_status changed to 'detached' self.assertEqual(volume['attach_status'], 'detached') # status un-modified self.assertEqual(volume['status'], 'available') def test_invalid_reset_attached_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'detached'}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'invalid' is not a valid attach_status body = {'os-reset_status': {'status': 'available', 'attach_status': 'invalid'}} req.body = jsonutils.dumps(body) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # bad request self.assertEqual(resp.status_int, 400) volume = db.volume_get(ctx, volume['id']) # status and attach_status un-modified self.assertEqual(volume['status'], 'available') self.assertEqual(volume['attach_status'], 'detached') def test_snapshot_reset_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'error_deleting' volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', 'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) snapshot = db.snapshot_get(ctx, snapshot['id']) # status changed to 'error' self.assertEqual(snapshot['status'], 'error') def test_invalid_status_for_snapshot(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'available' volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'available', 'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'attaching' is not a valid status for snapshots req.body = jsonutils.dumps({'os-reset_status': {'status': 'attaching'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot['id']) # status is still 'available' self.assertEqual(snapshot['status'], 'available') def test_force_delete(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is creating volume = db.volume_create(ctx, {'size': 1}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) # volume is deleted self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id']) def test_force_delete_snapshot(self): ctx = context.RequestContext('admin', 'fake', True) snapshot = stubs.stub_snapshot(1, host='foo') self.stubs.Set(db, 'volume_get', lambda x, y: snapshot) self.stubs.Set(db, 'snapshot_get', lambda x, y: snapshot) self.stubs.Set(volume_api.API, 'delete_snapshot', lambda *x, **y: True) path = '/v2/fake/snapshots/%s/action' % snapshot['id'] req = webob.Request.blank(path) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) self.assertEqual(resp.status_int, 202) def test_force_detach_instance_attached_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) mountpoint = '/dev/vbd' self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(ctx, volume['id']) self.assertEqual(volume['status'], 'in-use') self.assertEqual(volume['instance_uuid'], stubs.FAKE_UUID) self.assertIsNone(volume['attached_host']) self.assertEqual(volume['mountpoint'], mountpoint) self.assertEqual(volume['attach_status'], 'attached') admin_metadata = volume['volume_admin_metadata'] self.assertEqual(len(admin_metadata), 2) self.assertEqual(admin_metadata[0]['key'], 'readonly') self.assertEqual(admin_metadata[0]['value'], 'False') self.assertEqual(admin_metadata[1]['key'], 'attached_mode') self.assertEqual(admin_metadata[1]['value'], 'rw') conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual(conn_info['data']['access_mode'], 'rw') # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-force_detach': None}) # attach admin context to request req.environ['cinder.context'] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) # status changed to 'available' self.assertEqual(volume['status'], 'available') self.assertIsNone(volume['instance_uuid']) self.assertIsNone(volume['attached_host']) self.assertIsNone(volume['mountpoint']) self.assertEqual(volume['attach_status'], 'detached') admin_metadata = volume['volume_admin_metadata'] self.assertEqual(len(admin_metadata), 1) self.assertEqual(admin_metadata[0]['key'], 'readonly') self.assertEqual(admin_metadata[0]['value'], 'False') # cleanup svc.stop() def test_force_detach_host_attached_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) mountpoint = '/dev/vbd' host_name = 'fake-host' self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro') # volume is attached volume = db.volume_get(ctx, volume['id']) self.assertEqual(volume['status'], 'in-use') self.assertIsNone(volume['instance_uuid']) self.assertEqual(volume['attached_host'], host_name) self.assertEqual(volume['mountpoint'], mountpoint) self.assertEqual(volume['attach_status'], 'attached') admin_metadata = volume['volume_admin_metadata'] self.assertEqual(len(admin_metadata), 2) self.assertEqual(admin_metadata[0]['key'], 'readonly') self.assertEqual(admin_metadata[0]['value'], 'False') self.assertEqual(admin_metadata[1]['key'], 'attached_mode') self.assertEqual(admin_metadata[1]['value'], 'ro') conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual(conn_info['data']['access_mode'], 'ro') # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-force_detach': None}) # attach admin context to request req.environ['cinder.context'] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) # status changed to 'available' self.assertEqual(volume['status'], 'available') self.assertIsNone(volume['instance_uuid']) self.assertIsNone(volume['attached_host']) self.assertIsNone(volume['mountpoint']) self.assertEqual(volume['attach_status'], 'detached') admin_metadata = volume['volume_admin_metadata'] self.assertEqual(len(admin_metadata), 1) self.assertEqual(admin_metadata[0]['key'], 'readonly') self.assertEqual(admin_metadata[0]['value'], 'False') # cleanup svc.stop() def test_attach_in_used_volume_by_instance(self): """Test that attaching to an in-use volume fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) mountpoint = '/dev/vbd' self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual(conn_info['data']['access_mode'], 'rw') self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, fakes.get_fake_uuid(), None, mountpoint, 'rw') self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, fakes.get_fake_uuid(), None, mountpoint, 'ro') # cleanup svc.stop() def test_attach_in_used_volume_by_host(self): """Test that attaching to an in-use volume fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) mountpoint = '/dev/vbd' host_name = 'fake_host' self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'rw') conn_info = self.volume_api.initialize_connection(ctx, volume, connector) conn_info['data']['access_mode'] = 'rw' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, None, host_name, mountpoint, 'rw') self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, None, host_name, mountpoint, 'ro') # cleanup svc.stop() def test_invalid_iscsi_connector(self): """Test connector without the initiator (required by iscsi driver).""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.assertRaises(exception.VolumeBackendAPIException, self.volume_api.initialize_connection, ctx, volume, connector) # cleanup svc.stop() def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') values = {'status': 'attaching', 'instance_uuid': fakes.get_fake_uuid()} db.volume_update(ctx, volume['id'], values) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # cleanup svc.stop() def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') values = {'status': 'attaching', 'instance_uuid': fakes.get_fake_uuid()} db.volume_update(ctx, volume['id'], values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, values['instance_uuid'], None, mountpoint, 'ro') # cleanup svc.stop() def _migrate_volume_prep(self): admin_ctx = context.get_admin_context() # create volume's current host and the destination host db.service_create(admin_ctx, {'host': 'test', 'topic': CONF.volume_topic, 'created_at': timeutils.utcnow()}) db.service_create(admin_ctx, {'host': 'test2', 'topic': CONF.volume_topic, 'created_at': timeutils.utcnow()}) # current status is available volume = db.volume_create(admin_ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'attach_status': ''}) return volume def _migrate_volume_exec(self, ctx, volume, host, expected_status, force_host_copy=False): admin_ctx = context.get_admin_context() # build request to migrate to host req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': force_host_copy}} req.body = jsonutils.dumps(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(resp.status_int, expected_status) volume = db.volume_get(admin_ctx, volume['id']) return volume def test_migrate_volume_success(self): expected_status = 202 host = 'test2' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() volume = self._migrate_volume_exec(ctx, volume, host, expected_status) self.assertEqual(volume['migration_status'], 'starting') def test_migrate_volume_as_non_admin(self): expected_status = 403 host = 'test2' ctx = context.RequestContext('fake', 'fake') volume = self._migrate_volume_prep() self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_without_host_parameter(self): expected_status = 400 host = 'test3' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() # build request to migrate without host req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': False}} req.body = jsonutils.dumps(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(resp.status_int, expected_status) def test_migrate_volume_host_no_exist(self): expected_status = 400 host = 'test3' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_same_host(self): expected_status = 400 host = 'test' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_migrating(self): expected_status = 400 host = 'test2' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() model_update = {'migration_status': 'migrating'} volume = db.volume_update(ctx, volume['id'], model_update) self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_with_snap(self): expected_status = 400 host = 'test2' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() db.snapshot_create(ctx, {'volume_id': volume['id']}) self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_bad_force_host_copy1(self): expected_status = 400 host = 'test2' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() self._migrate_volume_exec(ctx, volume, host, expected_status, force_host_copy='foo') def test_migrate_volume_bad_force_host_copy2(self): expected_status = 400 host = 'test2' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_prep() self._migrate_volume_exec(ctx, volume, host, expected_status, force_host_copy=1) def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error, expected_status, expected_id, no_body=False): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'new_volume': new_volume['id'], 'error': error} if no_body: req.body = jsonutils.dumps({'': body}) else: req.body = jsonutils.dumps({'os-migrate_volume_completion': body}) req.environ['cinder.context'] = ctx resp = req.get_response(app()) resp_dict = ast.literal_eval(resp.body) # verify status self.assertEqual(resp.status_int, expected_status) if expected_id: self.assertEqual(resp_dict['save_volume_id'], expected_id) else: self.assertNotIn('save_volume_id', resp_dict) def test_migrate_volume_comp_as_non_admin(self): admin_ctx = context.get_admin_context() volume = db.volume_create(admin_ctx, {'id': 'fake1'}) new_volume = db.volume_create(admin_ctx, {'id': 'fake2'}) expected_status = 403 expected_id = None ctx = context.RequestContext('fake', 'fake') volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id) def test_migrate_volume_comp_no_mig_status(self): admin_ctx = context.get_admin_context() volume1 = db.volume_create(admin_ctx, {'id': 'fake1', 'migration_status': 'foo'}) volume2 = db.volume_create(admin_ctx, {'id': 'fake2', 'migration_status': None}) expected_status = 400 expected_id = None ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_comp_exec(ctx, volume1, volume2, False, expected_status, expected_id) volume = self._migrate_volume_comp_exec(ctx, volume2, volume1, False, expected_status, expected_id) def test_migrate_volume_comp_bad_mig_status(self): admin_ctx = context.get_admin_context() volume1 = db.volume_create(admin_ctx, {'id': 'fake1', 'migration_status': 'migrating'}) volume2 = db.volume_create(admin_ctx, {'id': 'fake2', 'migration_status': 'target:foo'}) expected_status = 400 expected_id = None ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_comp_exec(ctx, volume1, volume2, False, expected_status, expected_id) def test_migrate_volume_comp_no_action(self): admin_ctx = context.get_admin_context() volume = db.volume_create(admin_ctx, {'id': 'fake1'}) new_volume = db.volume_create(admin_ctx, {'id': 'fake2'}) expected_status = 400 expected_id = None ctx = context.RequestContext('fake', 'fake') self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id, True) def test_migrate_volume_comp_from_nova(self): admin_ctx = context.get_admin_context() volume = db.volume_create(admin_ctx, {'id': 'fake1', 'status': 'in-use', 'host': 'test', 'migration_status': None, 'attach_status': 'attached'}) new_volume = db.volume_create(admin_ctx, {'id': 'fake2', 'status': 'available', 'host': 'test', 'migration_status': None, 'attach_status': 'detached'}) expected_status = 200 expected_id = 'fake2' ctx = context.RequestContext('admin', 'fake', True) volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id)
#!/usr/bin/env python import pexpect import getopt import sys # Global Variables _user ="" _password = "" _client = "" _environment = "" _action = "" _patch = "" _actions = ['restart, shutdown, start, status, getListOfPatches, patchInstall, patchRollback, qaCheck, tuneConfig, audit'] #list of defined actions at this time _session ="" #list of regular expressions #variables for expect login_credentials = '[a-z]*[@][0-9]*[.][0-9]*[.][0-9]*[.][0-9]*[\'][s][ ]password' first_execution = '[a-z]*[@][a-zA-Z0-9]*[-][a-zA-Z0-9]*[-][a-zA-Z0-9]*[-][a-zA-Z0-9]*[\s][~]\]\$' reason_root = 'Reason: ' #root_password = '\[sudo\] password for '+_user+':' second_execution = '[a-z]*[@][a-zA-Z0-9]*[-][a-zA-Z0-9]*[-][a-zA-Z0-9]*[-][a-zA-Z0-9]*[\s][~]\]#' ### LIST OF CLIENTS THAT IM CURRENTLY USING ### IF YOU ARE ADDING A NEW CLIENT PLEASE USE ALL CAPITALIZED BECAUSE THAT IS THE WAY IM GETTING THEM ### ALSO IF YOU ARE USING STAGE USE STAGING - PRODUCTION AS PRD OR PROD - TEST FOR TST OR TS --- FULL NAMES FOR VARIABLES ### The naming is up to you, but whatever you type in the arguments will be capitalized entirely and then searched for any of the below variables CLIENT_PRODUCTION=['10.0.0.1','10.0.0.2'] # this is just an example def main(argv): try: opt, args = getopt.getopt(argv, "hu:p:c:e:a:z", ["help", "user=", "password=", "client=","environment=","action=","patch="]) except getopt.GetoptError, err: print str(err) usage() sys.exit(2) output = None verbose = False for o, a in opt: if o in ("-h", "--help"): usage() sys.exit() elif o in ("-u", "--user"): global _user, root_password _user = str(a) root_password = '\[sudo\] password for '+_user+':' elif o in ("-p", "--password"): global _password _password = str(a) elif o in ("-c", "--client"): global _client _client = str(a) elif o in ("-e", "--environment"): global _environment _environment = str(a) elif o in ("-a", "--action"): global _action _action = str(a) elif o in ("-z", "--patch"): global _patch _patch = str(a) else: assert False, "ERROR: Please refer to the help, -h or --help to get the list of available options" validate() #sys.exit() def usage(): usage = """ -h --help Prints this -u --user Username to log in into systems via SSH -p --password Password to log in into the systems via SSH -c --client Client that you want to run this: Client Name -e --environment Environment that you want to run: Production | Staging | Test -a --action (cmd) Command that you want to run at the moment in the servers: restart, shutdown, start, status, getListOfPatches, patchInstall, patchRollback, qaCheck, tuneCheck, audit -i --patch Patch that you want to rollback or remove (optional) - if the cmd is not related to patch this option is not needed Order: The order is important: user, password, client, environment, action, patch """ print usage def validate(): #debug """ print _user print _password print _client print _environment print _action print _patch """ global _action, _actions if _user == "": print "" print "ERROR: you need to enter an username. Please refer to the help." usage() sys.exit() elif _password == "": print "" print "ERROR: you need to enter a password. Please refer to the help." usage() sys.exit() elif _client == "": print "" print "ERROR: you need to define a client. Please refer to the help." usage() sys.exit() elif _environment == "": print "" print "ERROR: you need to define an environment . Please refer to the help." usage() sys.exit() elif _action in ("patchInstall", "patchRollback"): if _patch =="": print "" print "ERROR: when doing a patch install or rollback the patch id is required." sys.exit() if any(_action in s for s in _actions): run() def login(serverIP): session = pexpect.spawn('ssh '+_user+'@'+serverIP) global _session _session = session #log in files for debug #this is for debug to output everything to the screen session.logfile = sys.stdout #start exchanging information to log in and become root #check if new host to add it to known_hosts """ i = session.expect(['.* password:', '.* continue connecting (yes/no)?']) if i == 1: session.sendline('yes') else: """ session.expect(login_credentials) session.sendline(_password) session.expect(first_execution) #i logged in, now i want to become root session.sendline('root') session.expect(reason_root) session.sendline('check') session.expect(root_password) session.sendline(_password) session.expect(second_execution) return session def run(): global _client, _action _client = _client.upper()+"_"+_environment.upper() client = eval(str(_client)) for server in client: print "" print "==========" print "initializing connection to: " +server # creating the session session = login(server) #executing action print "---- start of execution -----" eval(str(_action))() print "---- end of execution -----" #closing connection #session.close(session) print "" print "finish connection to: "+ server print "===========" ### List of functions to the executed in the server def status(): global _session session = _session session.sendline('ServiceController.sh services.status') session.expect(second_execution) def getListOfPatches(): global _session session = _session session.sendline('/mnt/asp/utils/app/bbpatch/bbpatch.sh list') session.expect(second_execution) def restart(): global _session session = _session session.sendline('saferestart.sh') session.expect(second_execution) def shutdown(): global _session session = _session session.sendline('safestop.sh') session.expect(second_execution) def start(): global _session session = _session session.sendline('safestart.sh') session.expect(second_execution) # to build patchInstall, patchRollback """ def patchRollback(): global _session,_patch session = _session session.sendline('yes|/mnt/asp/utils/app/bbpatch/bbpatch.sh rollback '+ _patch ') session.expect(second_execution) """ def qaCheck(): global _session session = _session session.sendline('qacheck') session.expect(second_execution) def tuneCheck(): global _session session = _session session.sendline('tunecheck') session.expect(second_execution) def audit(): global _session session = _session session.sendline('audit.pl') session.expect(second_execution) if __name__ == "__main__": main(sys.argv[1:]) # [1:] slices off the first argument which is the name of the program
from functools import partial from os.path import join, expanduser from unittest.mock import MagicMock import uuid from genty import genty, genty_dataset, genty_args from app.subcommands.deploy_subcommand import DeploySubcommand from app.util.network import Network from test.framework.base_unit_test_case import BaseUnitTestCase @genty class TestDeploySubcommand(BaseUnitTestCase): def setUp(self): super().setUp() self.patch('app.subcommands.deploy_subcommand.fs.compress_directory') def test_binaries_tar_raises_exception_if_running_from_source(self): deploy_subcommand = DeploySubcommand() with self.assertRaisesRegex(SystemExit, '1'): deploy_subcommand._binaries_tar('python main.py deploy', '~/.clusterrunner/dist') def test_binaries_doesnt_raise_exception_if_running_from_bin(self): self.patch('os.path.isfile').return_value = True deploy_subcommand = DeploySubcommand() deploy_subcommand._binaries_tar('clusterrunner', '~/.clusterrunner/dist') def test_deploy_binaries_and_conf_deploys_both_conf_and_binary_for_remote_host(self): mock_DeployTarget = self.patch('app.subcommands.deploy_subcommand.DeployTarget') mock_DeployTarget_instance = mock_DeployTarget.return_value deploy_subcommand = DeploySubcommand() deploy_subcommand._deploy_binaries_and_conf( 'remote_host', 'username', 'exec', '/path/to/exec', '/path/to/conf') self.assertTrue(mock_DeployTarget_instance.deploy_binary.called) self.assertTrue(mock_DeployTarget_instance.deploy_conf.called) @genty_dataset( # expect to deploy the binary but not the conf when the current executable path is not the same # as the target executable path but the current conf path is the same as the target conf path same_conf_path_different_exe_path=genty_args( current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner2'), in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner.conf'), expect_deploy_conf=False, expect_deploy_binary=True, ), # expect not to deploy the binary or the conf when the current executable path is the same # as the target executable path and the current conf path is the same as the target conf path same_conf_path_same_exe_path=genty_args( current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner'), in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner.conf'), expect_deploy_conf=False, expect_deploy_binary=False, ), # expect to deploy the conf but not the binary when the current conf path is not the same # as the target conf path but the current binary path is the same as the target binary path different_conf_path_same_exe_path=genty_args( current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner'), in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner2.conf'), expect_deploy_conf=True, expect_deploy_binary=False, ), # expect to deploy the binary and the conf when the current executable path is not the same # as the target executable path and the current conf path is not the same as the target conf path different_conf_path_different_exe_path=genty_args( current_executable=join(expanduser('~'), '.clusterrunner', 'dist', 'clusterrunner2'), in_use_conf_path=join(expanduser('~'), '.clusterrunner', 'clusterrunner2.conf'), expect_deploy_conf=True, expect_deploy_binary=True, ), ) def test_deploy_binaries_and_conf_behaves_properly_if_conf_or_binary_is_in_use_on_localhost( self, current_executable, in_use_conf_path, expect_deploy_conf, expect_deploy_binary, ): mock_DeployTarget = self.patch('app.subcommands.deploy_subcommand.DeployTarget') mock_DeployTarget_instance = mock_DeployTarget.return_value deploy_subcommand = DeploySubcommand() deploy_subcommand._deploy_binaries_and_conf( 'localhost', 'username', current_executable, join(expanduser('~'), '.clusterrunner', 'clusterrunner.tgz'), in_use_conf_path ) self.assertEqual(expect_deploy_binary, mock_DeployTarget_instance.deploy_binary.called) self.assertEqual(expect_deploy_conf, mock_DeployTarget_instance.deploy_conf.called) def test_non_registered_slaves_returns_empty_list_if_all_registered(self): registered_hosts = ['host_1', 'host_2'] slaves_to_validate = ['host_1', 'host_2'] def get_host_id(*args, **kwargs): if args[0] == 'host_1': return 'host_id_1' elif args[0] == 'host_2': return 'host_id_2' else: return 'blah' old_host_id = Network.get_host_id Network.get_host_id = get_host_id deploy_subcommand = DeploySubcommand() non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate) Network.get_host_id = old_host_id self.assertEquals(0, len(non_registered)) def test_non_registered_slaves_returns_non_registered_slaves(self): registered_hosts = ['host_1', 'host_3'] slaves_to_validate = ['host_1', 'host_2', 'host_3', 'host_4'] def get_host_id(*args, **kwargs): if args[0] == 'host_1': return 'host_id_1' elif args[0] == 'host_2': return 'host_id_2' elif args[0] == 'host_3': return 'host_id_3' elif args[0] == 'host_4': return 'host_id_4' else: return 'blah' self.patch('app.util.network.Network.get_host_id', new=get_host_id) deploy_subcommand = DeploySubcommand() non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate) self.assertEquals(len(non_registered), 2) self.assertTrue('host_2' in non_registered) self.assertTrue('host_4' in non_registered) def test_non_registered_slaves_returns_empty_list_with_slaves_with_same_host_ids_but_different_names(self): registered_hosts = ['host_1_alias', 'host_2_alias'] slaves_to_validate = ['host_1', 'host_2'] def get_host_id(*args, **kwargs): if args[0] == 'host_1': return 'host_id_1' elif args[0] == 'host_2': return 'host_id_2' elif args[0] == 'host_1_alias': return 'host_id_1' elif args[0] == 'host_2_alias': return 'host_id_2' else: return 'blah' self.patch('app.util.network.Network.get_host_id', new=get_host_id) deploy_subcommand = DeploySubcommand() non_registered = deploy_subcommand._non_registered_slaves(registered_hosts, slaves_to_validate) self.assertEquals(0, len(non_registered)) @genty_dataset( valid_deployment=genty_args( slaves_to_validate=['slave_host_1', 'slave_host_2'], connected_slaves=['slave_host_1', 'slave_host_2'], host_name_to_uid={ 'slave_host_1': 'host_1_id', 'slave_host_2': 'host_2_id', }, is_valid=True, ), host_mismatch=genty_args( slaves_to_validate=['slave_host_1', 'slave_host_2'], connected_slaves=['slave_host_3', 'slave_host_2'], host_name_to_uid={ 'slave_host_2': 'host_2_id', }, is_valid=False, ), number_of_slaves_not_match=genty_args( slaves_to_validate=['slave_host_1'], connected_slaves=['slave_host_1', 'slave_host_2'], host_name_to_uid={ 'slave_host_1': 'host_1_id', }, is_valid=False, ), valid_deployment_different_host_names_with_same_host_id=genty_args( slaves_to_validate=['slave_host_1', 'slave_host_2'], connected_slaves=['slave_host_1_alias', 'slave_host_2'], host_name_to_uid={ 'slave_host_1': 'host_1_id', 'slave_host_1_alias': 'host_1_id', 'slave_host_2': 'host_2_id', }, is_valid=True, ), ) def test_validate_deployment_checks_each_slave_is_connected( self, slaves_to_validate, connected_slaves, host_name_to_uid, is_valid, ): def get_host_id(host): if host in host_name_to_uid: return host_name_to_uid[host] else: return str(uuid.uuid4()) self.patch('app.util.network.Network.get_host_id', new=get_host_id) deploy_subcommand = DeploySubcommand() deploy_subcommand._registered_slave_hostnames = MagicMock(return_value=connected_slaves) deploy_subcommand._SLAVE_REGISTRY_TIMEOUT_SEC = 1 deploy_subcommand._non_registered_slaves = MagicMock() validate = partial(deploy_subcommand._validate_successful_deployment, 'master_host_url', slaves_to_validate) if not is_valid: with self.assertRaises(SystemExit): validate() else: validate()
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates a syntax tree from a Mojo IDL file.""" import imp import os.path import sys def _GetDirAbove(dirname): """Returns the directory "above" this file containing |dirname| (which must also be "above" this file).""" path = os.path.abspath(__file__) while True: path, tail = os.path.split(path) assert tail if tail == dirname: return path try: imp.find_module("ply") except ImportError: sys.path.append(os.path.join(_GetDirAbove("public"), "public/third_party")) from ply import lex from ply import yacc from ..error import Error from . import ast from .lexer import Lexer _MAX_ORDINAL_VALUE = 0xffffffff _MAX_ARRAY_SIZE = 0xffffffff class ParseError(Error): """Class for errors from the parser.""" def __init__(self, filename, message, lineno=None, snippet=None): Error.__init__(self, filename, message, lineno=lineno, addenda=([snippet] if snippet else None)) # We have methods which look like they could be functions: # pylint: disable=R0201 class Parser(object): def __init__(self, lexer, source, filename): self.tokens = lexer.tokens self.source = source self.filename = filename # Names of functions # # In general, we name functions after the left-hand-side of the rule(s) that # they handle. E.g., |p_foo_bar| for a rule |foo_bar : ...|. # # There may be multiple functions handling rules for the same left-hand-side; # then we name the functions |p_foo_bar_N| (for left-hand-side |foo_bar|), # where N is a number (numbered starting from 1). Note that using multiple # functions is actually more efficient than having single functions handle # multiple rules (and, e.g., distinguishing them by examining |len(p)|). # # It's also possible to have a function handling multiple rules with different # left-hand-sides. We do not do this. # # See http://www.dabeaz.com/ply/ply.html#ply_nn25 for more details. # TODO(vtl): Get rid of the braces in the module "statement". (Consider # renaming "module" -> "package".) Then we'll be able to have a single rule # for root (by making module "optional"). def p_root_1(self, p): """root : """ p[0] = ast.Mojom(None, ast.ImportList(), []) def p_root_2(self, p): """root : root module""" if p[1].module is not None: raise ParseError(self.filename, "Multiple \"module\" statements not allowed:", p[2].lineno, snippet=self._GetSnippet(p[2].lineno)) if p[1].import_list.items or p[1].definition_list: raise ParseError( self.filename, "\"module\" statements must precede imports and definitions:", p[2].lineno, snippet=self._GetSnippet(p[2].lineno)) p[0] = p[1] p[0].module = p[2] def p_root_3(self, p): """root : root import""" if p[1].definition_list: raise ParseError(self.filename, "\"import\" statements must precede definitions:", p[2].lineno, snippet=self._GetSnippet(p[2].lineno)) p[0] = p[1] p[0].import_list.Append(p[2]) def p_root_4(self, p): """root : root definition""" p[0] = p[1] p[0].definition_list.append(p[2]) def p_import(self, p): """import : IMPORT STRING_LITERAL SEMI""" # 'eval' the literal to strip the quotes. # TODO(vtl): This eval is dubious. We should unquote/unescape ourselves. p[0] = ast.Import(eval(p[2]), filename=self.filename, lineno=p.lineno(2)) def p_module(self, p): """module : attribute_section MODULE identifier_wrapped SEMI""" p[0] = ast.Module(p[3], p[1], filename=self.filename, lineno=p.lineno(2)) def p_definition(self, p): """definition : struct | union | interface | enum | const""" p[0] = p[1] def p_attribute_section_1(self, p): """attribute_section : """ p[0] = None def p_attribute_section_2(self, p): """attribute_section : LBRACKET attribute_list RBRACKET""" p[0] = p[2] def p_attribute_list_1(self, p): """attribute_list : """ p[0] = ast.AttributeList() def p_attribute_list_2(self, p): """attribute_list : nonempty_attribute_list""" p[0] = p[1] def p_nonempty_attribute_list_1(self, p): """nonempty_attribute_list : attribute""" p[0] = ast.AttributeList(p[1]) def p_nonempty_attribute_list_2(self, p): """nonempty_attribute_list : nonempty_attribute_list COMMA attribute""" p[0] = p[1] p[0].Append(p[3]) def p_attribute(self, p): """attribute : NAME EQUALS evaled_literal | NAME EQUALS NAME""" p[0] = ast.Attribute(p[1], p[3], filename=self.filename, lineno=p.lineno(1)) def p_evaled_literal(self, p): """evaled_literal : literal""" # 'eval' the literal to strip the quotes. p[0] = eval(p[1]) def p_struct(self, p): """struct : attribute_section STRUCT NAME LBRACE struct_body RBRACE SEMI""" p[0] = ast.Struct(p[3], p[1], p[5]) def p_struct_body_1(self, p): """struct_body : """ p[0] = ast.StructBody() def p_struct_body_2(self, p): """struct_body : struct_body const | struct_body enum | struct_body struct_field""" p[0] = p[1] p[0].Append(p[2]) def p_struct_field(self, p): """struct_field : typename NAME ordinal default SEMI""" p[0] = ast.StructField(p[2], p[3], p[1], p[4]) def p_union(self, p): """union : UNION NAME LBRACE union_body RBRACE SEMI""" p[0] = ast.Union(p[2], p[4]) def p_union_body_1(self, p): """union_body : """ p[0] = ast.UnionBody() def p_union_body_2(self, p): """union_body : union_body union_field""" p[0] = p[1] p[1].Append(p[2]) def p_union_field(self, p): """union_field : typename NAME ordinal SEMI""" p[0] = ast.UnionField(p[2], p[3], p[1]) def p_default_1(self, p): """default : """ p[0] = None def p_default_2(self, p): """default : EQUALS constant""" p[0] = p[2] def p_interface(self, p): """interface : attribute_section INTERFACE NAME LBRACE interface_body \ RBRACE SEMI""" p[0] = ast.Interface(p[3], p[1], p[5]) def p_interface_body_1(self, p): """interface_body : """ p[0] = ast.InterfaceBody() def p_interface_body_2(self, p): """interface_body : interface_body const | interface_body enum | interface_body method""" p[0] = p[1] p[0].Append(p[2]) def p_response_1(self, p): """response : """ p[0] = None def p_response_2(self, p): """response : RESPONSE LPAREN parameter_list RPAREN""" p[0] = p[3] def p_method(self, p): """method : NAME ordinal LPAREN parameter_list RPAREN response SEMI""" p[0] = ast.Method(p[1], p[2], p[4], p[6]) def p_parameter_list_1(self, p): """parameter_list : """ p[0] = ast.ParameterList() def p_parameter_list_2(self, p): """parameter_list : nonempty_parameter_list""" p[0] = p[1] def p_nonempty_parameter_list_1(self, p): """nonempty_parameter_list : parameter""" p[0] = ast.ParameterList(p[1]) def p_nonempty_parameter_list_2(self, p): """nonempty_parameter_list : nonempty_parameter_list COMMA parameter""" p[0] = p[1] p[0].Append(p[3]) def p_parameter(self, p): """parameter : typename NAME ordinal""" p[0] = ast.Parameter(p[2], p[3], p[1], filename=self.filename, lineno=p.lineno(2)) def p_typename(self, p): """typename : nonnullable_typename QSTN | nonnullable_typename""" if len(p) == 2: p[0] = p[1] else: p[0] = p[1] + "?" def p_nonnullable_typename(self, p): """nonnullable_typename : basictypename | array | fixed_array | associative_array | interfacerequest""" p[0] = p[1] def p_basictypename(self, p): """basictypename : identifier | handletype""" p[0] = p[1] def p_handletype(self, p): """handletype : HANDLE | HANDLE LANGLE NAME RANGLE""" if len(p) == 2: p[0] = p[1] else: if p[3] not in ('data_pipe_consumer', 'data_pipe_producer', 'message_pipe', 'shared_buffer'): # Note: We don't enable tracking of line numbers for everything, so we # can't use |p.lineno(3)|. raise ParseError(self.filename, "Invalid handle type %r:" % p[3], lineno=p.lineno(1), snippet=self._GetSnippet(p.lineno(1))) p[0] = "handle<" + p[3] + ">" def p_array(self, p): """array : ARRAY LANGLE typename RANGLE""" p[0] = p[3] + "[]" def p_fixed_array(self, p): """fixed_array : ARRAY LANGLE typename COMMA INT_CONST_DEC RANGLE""" value = int(p[5]) if value == 0 or value > _MAX_ARRAY_SIZE: raise ParseError(self.filename, "Fixed array size %d invalid:" % value, lineno=p.lineno(5), snippet=self._GetSnippet(p.lineno(5))) p[0] = p[3] + "[" + p[5] + "]" def p_associative_array(self, p): """associative_array : MAP LANGLE identifier COMMA typename RANGLE""" p[0] = p[5] + "{" + p[3] + "}" def p_interfacerequest(self, p): """interfacerequest : identifier AMP""" p[0] = p[1] + "&" def p_ordinal_1(self, p): """ordinal : """ p[0] = None def p_ordinal_2(self, p): """ordinal : ORDINAL""" value = int(p[1][1:]) if value > _MAX_ORDINAL_VALUE: raise ParseError(self.filename, "Ordinal value %d too large:" % value, lineno=p.lineno(1), snippet=self._GetSnippet(p.lineno(1))) p[0] = ast.Ordinal(value, filename=self.filename, lineno=p.lineno(1)) def p_enum(self, p): """enum : ENUM NAME LBRACE nonempty_enum_value_list RBRACE SEMI | ENUM NAME LBRACE nonempty_enum_value_list COMMA RBRACE SEMI""" p[0] = ast.Enum(p[2], p[4], filename=self.filename, lineno=p.lineno(1)) def p_nonempty_enum_value_list_1(self, p): """nonempty_enum_value_list : enum_value""" p[0] = ast.EnumValueList(p[1]) def p_nonempty_enum_value_list_2(self, p): """nonempty_enum_value_list : nonempty_enum_value_list COMMA enum_value""" p[0] = p[1] p[0].Append(p[3]) def p_enum_value(self, p): """enum_value : NAME | NAME EQUALS int | NAME EQUALS identifier_wrapped""" p[0] = ast.EnumValue(p[1], p[3] if len(p) == 4 else None, filename=self.filename, lineno=p.lineno(1)) def p_const(self, p): """const : CONST typename NAME EQUALS constant SEMI""" p[0] = ast.Const(p[3], p[2], p[5]) def p_constant(self, p): """constant : literal | identifier_wrapped""" p[0] = p[1] def p_identifier_wrapped(self, p): """identifier_wrapped : identifier""" p[0] = ('IDENTIFIER', p[1]) # TODO(vtl): Make this produce a "wrapped" identifier (probably as an # |ast.Identifier|, to be added) and get rid of identifier_wrapped. def p_identifier(self, p): """identifier : NAME | NAME DOT identifier""" p[0] = ''.join(p[1:]) def p_literal(self, p): """literal : int | float | TRUE | FALSE | DEFAULT | STRING_LITERAL""" p[0] = p[1] def p_int(self, p): """int : int_const | PLUS int_const | MINUS int_const""" p[0] = ''.join(p[1:]) def p_int_const(self, p): """int_const : INT_CONST_DEC | INT_CONST_HEX""" p[0] = p[1] def p_float(self, p): """float : FLOAT_CONST | PLUS FLOAT_CONST | MINUS FLOAT_CONST""" p[0] = ''.join(p[1:]) def p_error(self, e): if e is None: # Unexpected EOF. # TODO(vtl): Can we figure out what's missing? raise ParseError(self.filename, "Unexpected end of file") raise ParseError(self.filename, "Unexpected %r:" % e.value, lineno=e.lineno, snippet=self._GetSnippet(e.lineno)) def _GetSnippet(self, lineno): return self.source.split('\n')[lineno - 1] def Parse(source, filename): lexer = Lexer(filename) parser = Parser(lexer, source, filename) lex.lex(object=lexer) yacc.yacc(module=parser, debug=0, write_tables=0) tree = yacc.parse(source) return tree
# Copyright (C) 2011, Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy import logging from webkitpy.common.memoized import memoized _log = logging.getLogger(__name__) # FIXME: Should this function be somewhere more general? def _invert_dictionary(dictionary): inverted_dictionary = {} for key, value in dictionary.items(): if inverted_dictionary.get(value): inverted_dictionary[value].append(key) else: inverted_dictionary[value] = [key] return inverted_dictionary class BaselineOptimizer(object): ROOT_LAYOUT_TESTS_DIRECTORY = 'tests' def __init__(self, host, port_names, skip_scm_commands): self._filesystem = host.filesystem self._port_factory = host.port_factory self._skip_scm_commands = skip_scm_commands self._files_to_delete = [] self._files_to_add = [] self._scm = host.scm() self._port_names = port_names # Only used by unittests. self.new_results_by_directory = [] def _baseline_root(self, port, baseline_name): virtual_suite = port.lookup_virtual_suite(baseline_name) if virtual_suite: return self._filesystem.join(self.ROOT_LAYOUT_TESTS_DIRECTORY, virtual_suite.name) return self.ROOT_LAYOUT_TESTS_DIRECTORY def _baseline_search_path(self, port, baseline_name): virtual_suite = port.lookup_virtual_suite(baseline_name) if virtual_suite: return port.virtual_baseline_search_path(baseline_name) return port.baseline_search_path() @memoized def _relative_baseline_search_paths(self, port_name, baseline_name): port = self._port_factory.get(port_name) relative_paths = [self._filesystem.relpath(path, port.webkit_base()) for path in self._baseline_search_path(port, baseline_name)] return relative_paths + [self._baseline_root(port, baseline_name)] def _join_directory(self, directory, baseline_name): # This code is complicated because both the directory name and the baseline_name have the virtual # test suite in the name and the virtual baseline name is not a strict superset of the non-virtual name. # For example, virtual/gpu/fast/canvas/foo-expected.png corresponds to fast/canvas/foo-expected.png and # the baseline directories are like platform/mac/virtual/gpu/fast/canvas. So, to get the path # to the baseline in the platform directory, we need to append jsut foo-expected.png to the directory. virtual_suite = self._port_factory.get().lookup_virtual_suite(baseline_name) if virtual_suite: baseline_name_without_virtual = baseline_name[len(virtual_suite.name) + 1:] else: baseline_name_without_virtual = baseline_name return self._filesystem.join(self._scm.checkout_root, directory, baseline_name_without_virtual) def read_results_by_directory(self, baseline_name): results_by_directory = {} directories = reduce(set.union, map(set, [self._relative_baseline_search_paths(port_name, baseline_name) for port_name in self._port_names])) for directory in directories: path = self._join_directory(directory, baseline_name) if self._filesystem.exists(path): results_by_directory[directory] = self._filesystem.sha1(path) return results_by_directory def _results_by_port_name(self, results_by_directory, baseline_name): results_by_port_name = {} for port_name in self._port_names: for directory in self._relative_baseline_search_paths(port_name, baseline_name): if directory in results_by_directory: results_by_port_name[port_name] = results_by_directory[directory] break return results_by_port_name @memoized def _directories_immediately_preceding_root(self, baseline_name): directories = set() for port_name in self._port_names: port = self._port_factory.get(port_name) directory = self._filesystem.relpath(self._baseline_search_path(port, baseline_name)[-1], port.webkit_base()) directories.add(directory) return directories def _optimize_result_for_root(self, new_results_by_directory, baseline_name): # The root directory (i.e. tests) is the only one that doesn't correspond # to a specific platform. As such, it's the only one where the baseline in fallback directories # immediately before it can be promoted up, i.e. if win and mac # have the same baseline, then it can be promoted up to be the tests baseline. # All other baselines can only be removed if they're redundant with a baseline earlier # in the fallback order. They can never promoted up. directories_immediately_preceding_root = self._directories_immediately_preceding_root(baseline_name) shared_result = None root_baseline_unused = False for directory in directories_immediately_preceding_root: this_result = new_results_by_directory.get(directory) # If any of these directories don't have a baseline, there's no optimization we can do. if not this_result: return if not shared_result: shared_result = this_result elif shared_result != this_result: root_baseline_unused = True baseline_root = self._baseline_root(self._port_factory.get(), baseline_name) # The root baseline is unused if all the directories immediately preceding the root # have a baseline, but have different baselines, so the baselines can't be promoted up. if root_baseline_unused: if baseline_root in new_results_by_directory: del new_results_by_directory[baseline_root] return new_results_by_directory[baseline_root] = shared_result for directory in directories_immediately_preceding_root: del new_results_by_directory[directory] def _find_optimal_result_placement(self, baseline_name): results_by_directory = self.read_results_by_directory(baseline_name) results_by_port_name = self._results_by_port_name(results_by_directory, baseline_name) port_names_by_result = _invert_dictionary(results_by_port_name) new_results_by_directory = self._remove_redundant_results(results_by_directory, results_by_port_name, port_names_by_result, baseline_name) self._optimize_result_for_root(new_results_by_directory, baseline_name) return results_by_directory, new_results_by_directory def _remove_redundant_results(self, results_by_directory, results_by_port_name, port_names_by_result, baseline_name): new_results_by_directory = copy.copy(results_by_directory) for port_name in self._port_names: current_result = results_by_port_name.get(port_name) # This happens if we're missing baselines for a port. if not current_result: continue; fallback_path = self._relative_baseline_search_paths(port_name, baseline_name) current_index, current_directory = self._find_in_fallbackpath(fallback_path, current_result, new_results_by_directory) for index in range(current_index + 1, len(fallback_path)): new_directory = fallback_path[index] if not new_directory in new_results_by_directory: # No result for this baseline in this directory. continue elif new_results_by_directory[new_directory] == current_result: # Result for new_directory are redundant with the result earlier in the fallback order. if current_directory in new_results_by_directory: del new_results_by_directory[current_directory] else: # The new_directory contains a different result, so stop trying to push results up. break return new_results_by_directory def _find_in_fallbackpath(self, fallback_path, current_result, results_by_directory): for index, directory in enumerate(fallback_path): if directory in results_by_directory and (results_by_directory[directory] == current_result): return index, directory assert False, "result %s not found in fallback_path %s, %s" % (current_result, fallback_path, results_by_directory) def _platform(self, filename): platform_dir = self.ROOT_LAYOUT_TESTS_DIRECTORY + self._filesystem.sep + 'platform' + self._filesystem.sep if filename.startswith(platform_dir): return filename.replace(platform_dir, '').split(self._filesystem.sep)[0] platform_dir = self._filesystem.join(self._scm.checkout_root, platform_dir) if filename.startswith(platform_dir): return filename.replace(platform_dir, '').split(self._filesystem.sep)[0] return '(generic)' def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory): data_for_result = {} for directory, result in results_by_directory.items(): if not result in data_for_result: source = self._join_directory(directory, baseline_name) data_for_result[result] = self._filesystem.read_binary_file(source) scm_files = [] fs_files = [] for directory, result in results_by_directory.items(): if new_results_by_directory.get(directory) != result: file_name = self._join_directory(directory, baseline_name) if self._scm.exists(file_name): scm_files.append(file_name) else: fs_files.append(file_name) if scm_files or fs_files: if scm_files: _log.debug(" Deleting (SCM):") for platform_dir in sorted(self._platform(filename) for filename in scm_files): _log.debug(" " + platform_dir) if self._skip_scm_commands: self._files_to_delete.extend(scm_files) else: self._scm.delete_list(scm_files) if fs_files: _log.debug(" Deleting (file system):") for platform_dir in sorted(self._platform(filename) for filename in fs_files): _log.debug(" " + platform_dir) for filename in fs_files: self._filesystem.remove(filename) else: _log.debug(" (Nothing to delete)") file_names = [] for directory, result in new_results_by_directory.items(): if results_by_directory.get(directory) != result: destination = self._join_directory(directory, baseline_name) self._filesystem.maybe_make_directory(self._filesystem.split(destination)[0]) self._filesystem.write_binary_file(destination, data_for_result[result]) file_names.append(destination) if file_names: _log.debug(" Adding:") for platform_dir in sorted(self._platform(filename) for filename in file_names): _log.debug(" " + platform_dir) if self._skip_scm_commands: # Have adds win over deletes. self._files_to_delete = list(set(self._files_to_delete) - set(file_names)) self._files_to_add.extend(file_names) else: self._scm.add_list(file_names) else: _log.debug(" (Nothing to add)") def write_by_directory(self, results_by_directory, writer, indent): for path in sorted(results_by_directory): writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6])) def _optimize_subtree(self, baseline_name): basename = self._filesystem.basename(baseline_name) results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name) if new_results_by_directory == results_by_directory: if new_results_by_directory: _log.debug(" %s: (already optimal)" % basename) self.write_by_directory(results_by_directory, _log.debug, " ") else: _log.debug(" %s: (no baselines found)" % basename) # This is just used for unittests. Intentionally set it to the old data if we don't modify anything. self.new_results_by_directory.append(results_by_directory) return True if self._results_by_port_name(results_by_directory, baseline_name) != self._results_by_port_name(new_results_by_directory, baseline_name): # This really should never happen. Just a sanity check to make sure the script fails in the case of bugs # instead of committing incorrect baselines. _log.error(" %s: optimization failed" % basename) self.write_by_directory(results_by_directory, _log.warning, " ") return False _log.debug(" %s:" % basename) _log.debug(" Before: ") self.write_by_directory(results_by_directory, _log.debug, " ") _log.debug(" After: ") self.write_by_directory(new_results_by_directory, _log.debug, " ") self._move_baselines(baseline_name, results_by_directory, new_results_by_directory) return True def _optimize_virtual_root(self, baseline_name, non_virtual_baseline_name): default_port = self._port_factory.get() virtual_root_expected_baseline_path = self._filesystem.join(default_port.layout_tests_dir(), baseline_name) if not self._filesystem.exists(virtual_root_expected_baseline_path): return root_sha1 = self._filesystem.sha1(virtual_root_expected_baseline_path) results_by_directory = self.read_results_by_directory(non_virtual_baseline_name) # See if all the immediate predecessors of the virtual root have the same expected result. for port_name in self._port_names: directories = self._relative_baseline_search_paths(port_name, non_virtual_baseline_name) for directory in directories: if directory not in results_by_directory: continue if results_by_directory[directory] != root_sha1: return break _log.debug("Deleting redundant virtual root expected result.") if self._skip_scm_commands and virtual_root_expected_baseline_path in self._files_to_add: self._files_to_add.remove(virtual_root_expected_baseline_path) if self._scm.exists(virtual_root_expected_baseline_path): _log.debug(" Deleting (SCM): " + virtual_root_expected_baseline_path) if self._skip_scm_commands: self._files_to_delete.append(virtual_root_expected_baseline_path) else: self._scm.delete(virtual_root_expected_baseline_path) else: _log.debug(" Deleting (file system): " + virtual_root_expected_baseline_path) self._filesystem.remove(virtual_root_expected_baseline_path) def optimize(self, baseline_name): # The virtual fallback path is the same as the non-virtual one tacked on to the bottom of the non-virtual path. # See https://docs.google.com/a/chromium.org/drawings/d/1eGdsIKzJ2dxDDBbUaIABrN4aMLD1bqJTfyxNGZsTdmg/edit for # a visual representation of this. # # So, we can optimize the virtual path, then the virtual root and then the regular path. self._files_to_delete = [] self._files_to_add = [] _log.debug("Optimizing regular fallback path.") result = self._optimize_subtree(baseline_name) non_virtual_baseline_name = self._port_factory.get().lookup_virtual_test_base(baseline_name) if not non_virtual_baseline_name: return result, self._files_to_delete, self._files_to_add self._optimize_virtual_root(baseline_name, non_virtual_baseline_name) _log.debug("Optimizing non-virtual fallback path.") result |= self._optimize_subtree(non_virtual_baseline_name) return result, self._files_to_delete, self._files_to_add
""" Forward Simulation of Gravity Anomaly Data on a Tensor Mesh =========================================================== Here we use the module *SimPEG.potential_fields.gravity* to predict gravity anomaly data for a synthetic density contrast model. The simulation is carried out on a tensor mesh. For this tutorial, we focus on the following: - How to create gravity surveys - How to predict gravity anomaly data for a density contrast model - How to include surface topography - The units of the density contrast model and resulting data """ ######################################################################### # Import Modules # -------------- # import numpy as np from scipy.interpolate import LinearNDInterpolator import matplotlib as mpl import matplotlib.pyplot as plt import os from discretize import TensorMesh from discretize.utils import mkvc from SimPEG.utils import plot2Ddata, model_builder, surface2ind_topo from SimPEG import maps from SimPEG.potential_fields import gravity save_output = False # sphinx_gallery_thumbnail_number = 2 ############################################# # Defining Topography # ------------------- # # Surface topography is defined as an (N, 3) numpy array. We create it here but # the topography could also be loaded from a file. # [x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41)) z_topo = -15 * np.exp(-(x_topo ** 2 + y_topo ** 2) / 80 ** 2) x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) topo_xyz = np.c_[x_topo, y_topo, z_topo] ############################################# # Defining the Survey # ------------------- # # Here, we define survey that will be used for the forward simulation. Gravity # surveys are simple to create. The user only needs an (N, 3) array to define # the xyz locations of the observation locations, and a list of field components # which are to be measured. # # Define the observation locations as an (N, 3) numpy array or load them. x = np.linspace(-80.0, 80.0, 17) y = np.linspace(-80.0, 80.0, 17) x, y = np.meshgrid(x, y) x, y = mkvc(x.T), mkvc(y.T) fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo) z = fun_interp(np.c_[x, y]) + 5.0 receiver_locations = np.c_[x, y, z] # Define the component(s) of the field we want to simulate as strings within # a list. Here we simulate only the vertical component of gravity anomaly. components = ["gz"] # Use the observation locations and components to define the receivers. To # simulate data, the receivers must be defined as a list. receiver_list = gravity.receivers.Point(receiver_locations, components=components) receiver_list = [receiver_list] # Defining the source field. source_field = gravity.sources.SourceField(receiver_list=receiver_list) # Defining the survey survey = gravity.survey.Survey(source_field) ############################################# # Defining a Tensor Mesh # ---------------------- # # Here, we create the tensor mesh that will be used to predict gravity anomaly # data. # dh = 5.0 hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] hz = [(dh, 5, -1.3), (dh, 15)] mesh = TensorMesh([hx, hy, hz], "CCN") ######################################################## # Density Contrast Model and Mapping on Tensor Mesh # ------------------------------------------------- # # Here, we create the density contrast model that will be used to predict # gravity anomaly data and the mapping from the model to the mesh. The model # consists of a less dense block and a more dense sphere. # # Define density contrast values for each unit in g/cc background_density = 0.0 block_density = -0.2 sphere_density = 0.2 # Find the indices for the active mesh cells (e.g. cells below surface) ind_active = surface2ind_topo(mesh, topo_xyz) # Define mapping from model to active cells. The model consists of a value for # each cell below the Earth's surface. nC = int(ind_active.sum()) model_map = maps.IdentityMap(nP=nC) # Define model. Models in SimPEG are vector arrays. model = background_density * np.ones(nC) # You could find the indicies of specific cells within the model and change their # value to add structures. ind_block = ( (mesh.gridCC[ind_active, 0] > -50.0) & (mesh.gridCC[ind_active, 0] < -20.0) & (mesh.gridCC[ind_active, 1] > -15.0) & (mesh.gridCC[ind_active, 1] < 15.0) & (mesh.gridCC[ind_active, 2] > -50.0) & (mesh.gridCC[ind_active, 2] < -30.0) ) model[ind_block] = block_density # You can also use SimPEG utilities to add structures to the model more concisely ind_sphere = model_builder.getIndicesSphere(np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC) ind_sphere = ind_sphere[ind_active] model[ind_sphere] = sphere_density # Plot Density Contrast Model fig = plt.figure(figsize=(9, 4)) plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78]) mesh.plotSlice( plotting_map * model, normal="Y", ax=ax1, ind=int(mesh.nCy / 2), grid=True, clim=(np.min(model), np.max(model)), pcolorOpts={"cmap": "viridis"}, ) ax1.set_title("Model slice at y = 0 m") ax1.set_xlabel("x (m)") ax1.set_ylabel("z (m)") ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) norm = mpl.colors.Normalize(vmin=np.min(model), vmax=np.max(model)) cbar = mpl.colorbar.ColorbarBase( ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis ) cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) plt.show() ####################################################### # Simulation: Gravity Anomaly Data on Tensor Mesh # ----------------------------------------------- # # Here we demonstrate how to predict gravity anomaly data using the integral # formulation. # # Define the forward simulation. By setting the 'store_sensitivities' keyword # argument to "forward_only", we simulate the data without storing the sensitivities simulation = gravity.simulation.Simulation3DIntegral( survey=survey, mesh=mesh, rhoMap=model_map, actInd=ind_active, store_sensitivities="forward_only", ) # Compute predicted data for some model # SimPEG uses right handed coordinate where Z is positive upward. # This causes gravity signals look "inconsistent" with density values in visualization. dpred = simulation.dpred(model) # Plot fig = plt.figure(figsize=(7, 5)) ax1 = fig.add_axes([0.1, 0.1, 0.75, 0.85]) plot2Ddata(receiver_list[0].locations, dpred, ax=ax1, contourOpts={"cmap": "bwr"}) ax1.set_title("Gravity Anomaly (Z-component)") ax1.set_xlabel("x (m)") ax1.set_ylabel("y (m)") ax2 = fig.add_axes([0.82, 0.1, 0.03, 0.85]) norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dpred)), vmax=np.max(np.abs(dpred))) cbar = mpl.colorbar.ColorbarBase( ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" ) cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) plt.show() ####################################################### # Optional: Exporting Results # --------------------------- # # Write the data, topography and true model # if save_output: dir_path = os.path.dirname(__file__).split(os.path.sep) dir_path.extend(["outputs"]) dir_path = os.path.sep.join(dir_path) + os.path.sep if not os.path.exists(dir_path): os.mkdir(dir_path) fname = dir_path + "gravity_topo.txt" np.savetxt(fname, np.c_[topo_xyz], fmt="%.4e") np.random.seed(737) maximum_anomaly = np.max(np.abs(dpred)) noise = 0.01 * maximum_anomaly * np.random.rand(len(dpred)) fname = dir_path + "gravity_data.obs" np.savetxt(fname, np.c_[receiver_locations, dpred + noise], fmt="%.4e")
# $Id: ip.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $ # -*- coding: utf-8 -*- """Internet Protocol.""" import dpkt from decorators import deprecated class IP(dpkt.Packet): __hdr__ = ( ('_v_hl', 'B', (4 << 4) | (20 >> 2)), ('tos', 'B', 0), ('len', 'H', 20), ('id', 'H', 0), ('off', 'H', 0), ('ttl', 'B', 64), ('p', 'B', 0), ('sum', 'H', 0), ('src', '4s', '\x00' * 4), ('dst', '4s', '\x00' * 4) ) _protosw = {} opts = '' def __init__(self, *args, **kwargs): super(IP, self).__init__(*args, **kwargs) # If IP packet is not initialized by string and the len field has # been rewritten. if not args and 'len' not in kwargs: self.len = self.__len__() @property def v(self): return self._v_hl >> 4 @v.setter def v(self, v): self._v_hl = (v << 4) | (self._v_hl & 0xf) @property def hl(self): return self._v_hl & 0xf @hl.setter def hl(self, hl): self._v_hl = (self._v_hl & 0xf0) | hl @property def rf(self): return (self.off >> 15) & 0x1 @rf.setter def rf(self, rf): self.off = (self.off & ~IP_RF) | (rf << 15) @property def df(self): return (self.off >> 14) & 0x1 @df.setter def df(self, df): self.off = (self.off & ~IP_DF) | (df << 14) @property def mf(self): return (self.off >> 13) & 0x1 @mf.setter def mf(self, mf): self.off = (self.off & ~IP_MF) | (mf << 13) @property def offset(self): return (self.off & IP_OFFMASK) << 3 @offset.setter def offset(self, offset): self.off = (self.off & ~IP_OFFMASK) | (offset >> 3) # Deprecated methods, will be removed in the future # ================================================= @deprecated('v') def _get_v(self): return self.v @deprecated('v') def _set_v(self, v): self.v = v @deprecated('hl') def _get_hl(self): return self.hl @deprecated('hl') def _set_hl(self, hl): self.hl = hl # ================================================= def __len__(self): return self.__hdr_len__ + len(self.opts) + len(self.data) def __str__(self): self.len = self.__len__() if self.sum == 0: self.sum = dpkt.in_cksum(self.pack_hdr() + str(self.opts)) if (self.p == 6 or self.p == 17) and (self.off & (IP_MF | IP_OFFMASK)) == 0 and \ isinstance(self.data, dpkt.Packet) and self.data.sum == 0: # Set zeroed TCP and UDP checksums for non-fragments. p = str(self.data) s = dpkt.struct.pack('>4s4sxBH', self.src, self.dst, self.p, len(p)) s = dpkt.in_cksum_add(0, s) s = dpkt.in_cksum_add(s, p) self.data.sum = dpkt.in_cksum_done(s) if self.p == 17 and self.data.sum == 0: self.data.sum = 0xffff # RFC 768 # XXX - skip transports which don't need the pseudoheader return self.pack_hdr() + str(self.opts) + str(self.data) def unpack(self, buf): dpkt.Packet.unpack(self, buf) ol = ((self._v_hl & 0xf) << 2) - self.__hdr_len__ if ol < 0: raise dpkt.UnpackError, 'invalid header length' self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol] if self.len: buf = buf[self.__hdr_len__ + ol:self.len] else: # very likely due to TCP segmentation offload buf = buf[self.__hdr_len__ + ol:] try: self.data = self._protosw[self.p](buf) setattr(self, self.data.__class__.__name__.lower(), self.data) except (KeyError, dpkt.UnpackError): self.data = buf @classmethod def set_proto(cls, p, pktclass): cls._protosw[p] = pktclass @classmethod def get_proto(cls, p): return cls._protosw[p] # IP Headers IP_ADDR_LEN = 0x04 IP_ADDR_BITS = 0x20 IP_HDR_LEN = 0x14 IP_OPT_LEN = 0x02 IP_OPT_LEN_MAX = 0x28 IP_HDR_LEN_MAX = IP_HDR_LEN + IP_OPT_LEN_MAX IP_LEN_MAX = 0xffff IP_LEN_MIN = IP_HDR_LEN # Reserved Addresses IP_ADDR_ANY = "\x00\x00\x00\x00" # 0.0.0.0 IP_ADDR_BROADCAST = "\xff\xff\xff\xff" # 255.255.255.255 IP_ADDR_LOOPBACK = "\x7f\x00\x00\x01" # 127.0.0.1 IP_ADDR_MCAST_ALL = "\xe0\x00\x00\x01" # 224.0.0.1 IP_ADDR_MCAST_LOCAL = "\xe0\x00\x00\xff" # 224.0.0.255 # Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474") IP_TOS_DEFAULT = 0x00 # default IP_TOS_LOWDELAY = 0x10 # low delay IP_TOS_THROUGHPUT = 0x08 # high throughput IP_TOS_RELIABILITY = 0x04 # high reliability IP_TOS_LOWCOST = 0x02 # low monetary cost - XXX IP_TOS_ECT = 0x02 # ECN-capable transport IP_TOS_CE = 0x01 # congestion experienced # IP precedence (high 3 bits of ip_tos), hopefully unused IP_TOS_PREC_ROUTINE = 0x00 IP_TOS_PREC_PRIORITY = 0x20 IP_TOS_PREC_IMMEDIATE = 0x40 IP_TOS_PREC_FLASH = 0x60 IP_TOS_PREC_FLASHOVERRIDE = 0x80 IP_TOS_PREC_CRITIC_ECP = 0xa0 IP_TOS_PREC_INTERNETCONTROL = 0xc0 IP_TOS_PREC_NETCONTROL = 0xe0 # Fragmentation flags (ip_off) IP_RF = 0x8000 # reserved IP_DF = 0x4000 # don't fragment IP_MF = 0x2000 # more fragments (not last frag) IP_OFFMASK = 0x1fff # mask for fragment offset # Time-to-live (ip_ttl), seconds IP_TTL_DEFAULT = 64 # default ttl, RFC 1122, RFC 1340 IP_TTL_MAX = 255 # maximum ttl # Protocol (ip_p) - http://www.iana.org/assignments/protocol-numbers IP_PROTO_IP = 0 # dummy for IP IP_PROTO_HOPOPTS = IP_PROTO_IP # IPv6 hop-by-hop options IP_PROTO_ICMP = 1 # ICMP IP_PROTO_IGMP = 2 # IGMP IP_PROTO_GGP = 3 # gateway-gateway protocol IP_PROTO_IPIP = 4 # IP in IP IP_PROTO_ST = 5 # ST datagram mode IP_PROTO_TCP = 6 # TCP IP_PROTO_CBT = 7 # CBT IP_PROTO_EGP = 8 # exterior gateway protocol IP_PROTO_IGP = 9 # interior gateway protocol IP_PROTO_BBNRCC = 10 # BBN RCC monitoring IP_PROTO_NVP = 11 # Network Voice Protocol IP_PROTO_PUP = 12 # PARC universal packet IP_PROTO_ARGUS = 13 # ARGUS IP_PROTO_EMCON = 14 # EMCON IP_PROTO_XNET = 15 # Cross Net Debugger IP_PROTO_CHAOS = 16 # Chaos IP_PROTO_UDP = 17 # UDP IP_PROTO_MUX = 18 # multiplexing IP_PROTO_DCNMEAS = 19 # DCN measurement IP_PROTO_HMP = 20 # Host Monitoring Protocol IP_PROTO_PRM = 21 # Packet Radio Measurement IP_PROTO_IDP = 22 # Xerox NS IDP IP_PROTO_TRUNK1 = 23 # Trunk-1 IP_PROTO_TRUNK2 = 24 # Trunk-2 IP_PROTO_LEAF1 = 25 # Leaf-1 IP_PROTO_LEAF2 = 26 # Leaf-2 IP_PROTO_RDP = 27 # "Reliable Datagram" proto IP_PROTO_IRTP = 28 # Inet Reliable Transaction IP_PROTO_TP = 29 # ISO TP class 4 IP_PROTO_NETBLT = 30 # Bulk Data Transfer IP_PROTO_MFPNSP = 31 # MFE Network Services IP_PROTO_MERITINP = 32 # Merit Internodal Protocol IP_PROTO_SEP = 33 # Sequential Exchange proto IP_PROTO_3PC = 34 # Third Party Connect proto IP_PROTO_IDPR = 35 # Interdomain Policy Route IP_PROTO_XTP = 36 # Xpress Transfer Protocol IP_PROTO_DDP = 37 # Datagram Delivery Proto IP_PROTO_CMTP = 38 # IDPR Ctrl Message Trans IP_PROTO_TPPP = 39 # TP++ Transport Protocol IP_PROTO_IL = 40 # IL Transport Protocol IP_PROTO_IP6 = 41 # IPv6 IP_PROTO_SDRP = 42 # Source Demand Routing IP_PROTO_ROUTING = 43 # IPv6 routing header IP_PROTO_FRAGMENT = 44 # IPv6 fragmentation header IP_PROTO_RSVP = 46 # Reservation protocol IP_PROTO_GRE = 47 # General Routing Encap IP_PROTO_MHRP = 48 # Mobile Host Routing IP_PROTO_ENA = 49 # ENA IP_PROTO_ESP = 50 # Encap Security Payload IP_PROTO_AH = 51 # Authentication Header IP_PROTO_INLSP = 52 # Integated Net Layer Sec IP_PROTO_SWIPE = 53 # SWIPE IP_PROTO_NARP = 54 # NBMA Address Resolution IP_PROTO_MOBILE = 55 # Mobile IP, RFC 2004 IP_PROTO_TLSP = 56 # Transport Layer Security IP_PROTO_SKIP = 57 # SKIP IP_PROTO_ICMP6 = 58 # ICMP for IPv6 IP_PROTO_NONE = 59 # IPv6 no next header IP_PROTO_DSTOPTS = 60 # IPv6 destination options IP_PROTO_ANYHOST = 61 # any host internal proto IP_PROTO_CFTP = 62 # CFTP IP_PROTO_ANYNET = 63 # any local network IP_PROTO_EXPAK = 64 # SATNET and Backroom EXPAK IP_PROTO_KRYPTOLAN = 65 # Kryptolan IP_PROTO_RVD = 66 # MIT Remote Virtual Disk IP_PROTO_IPPC = 67 # Inet Pluribus Packet Core IP_PROTO_DISTFS = 68 # any distributed fs IP_PROTO_SATMON = 69 # SATNET Monitoring IP_PROTO_VISA = 70 # VISA Protocol IP_PROTO_IPCV = 71 # Inet Packet Core Utility IP_PROTO_CPNX = 72 # Comp Proto Net Executive IP_PROTO_CPHB = 73 # Comp Protocol Heart Beat IP_PROTO_WSN = 74 # Wang Span Network IP_PROTO_PVP = 75 # Packet Video Protocol IP_PROTO_BRSATMON = 76 # Backroom SATNET Monitor IP_PROTO_SUNND = 77 # SUN ND Protocol IP_PROTO_WBMON = 78 # WIDEBAND Monitoring IP_PROTO_WBEXPAK = 79 # WIDEBAND EXPAK IP_PROTO_EON = 80 # ISO CNLP IP_PROTO_VMTP = 81 # Versatile Msg Transport IP_PROTO_SVMTP = 82 # Secure VMTP IP_PROTO_VINES = 83 # VINES IP_PROTO_TTP = 84 # TTP IP_PROTO_NSFIGP = 85 # NSFNET-IGP IP_PROTO_DGP = 86 # Dissimilar Gateway Proto IP_PROTO_TCF = 87 # TCF IP_PROTO_EIGRP = 88 # EIGRP IP_PROTO_OSPF = 89 # Open Shortest Path First IP_PROTO_SPRITERPC = 90 # Sprite RPC Protocol IP_PROTO_LARP = 91 # Locus Address Resolution IP_PROTO_MTP = 92 # Multicast Transport Proto IP_PROTO_AX25 = 93 # AX.25 Frames IP_PROTO_IPIPENCAP = 94 # yet-another IP encap IP_PROTO_MICP = 95 # Mobile Internet Ctrl IP_PROTO_SCCSP = 96 # Semaphore Comm Sec Proto IP_PROTO_ETHERIP = 97 # Ethernet in IPv4 IP_PROTO_ENCAP = 98 # encapsulation header IP_PROTO_ANYENC = 99 # private encryption scheme IP_PROTO_GMTP = 100 # GMTP IP_PROTO_IFMP = 101 # Ipsilon Flow Mgmt Proto IP_PROTO_PNNI = 102 # PNNI over IP IP_PROTO_PIM = 103 # Protocol Indep Multicast IP_PROTO_ARIS = 104 # ARIS IP_PROTO_SCPS = 105 # SCPS IP_PROTO_QNX = 106 # QNX IP_PROTO_AN = 107 # Active Networks IP_PROTO_IPCOMP = 108 # IP Payload Compression IP_PROTO_SNP = 109 # Sitara Networks Protocol IP_PROTO_COMPAQPEER = 110 # Compaq Peer Protocol IP_PROTO_IPXIP = 111 # IPX in IP IP_PROTO_VRRP = 112 # Virtual Router Redundancy IP_PROTO_PGM = 113 # PGM Reliable Transport IP_PROTO_ANY0HOP = 114 # 0-hop protocol IP_PROTO_L2TP = 115 # Layer 2 Tunneling Proto IP_PROTO_DDX = 116 # D-II Data Exchange (DDX) IP_PROTO_IATP = 117 # Interactive Agent Xfer IP_PROTO_STP = 118 # Schedule Transfer Proto IP_PROTO_SRP = 119 # SpectraLink Radio Proto IP_PROTO_UTI = 120 # UTI IP_PROTO_SMP = 121 # Simple Message Protocol IP_PROTO_SM = 122 # SM IP_PROTO_PTP = 123 # Performance Transparency IP_PROTO_ISIS = 124 # ISIS over IPv4 IP_PROTO_FIRE = 125 # FIRE IP_PROTO_CRTP = 126 # Combat Radio Transport IP_PROTO_CRUDP = 127 # Combat Radio UDP IP_PROTO_SSCOPMCE = 128 # SSCOPMCE IP_PROTO_IPLT = 129 # IPLT IP_PROTO_SPS = 130 # Secure Packet Shield IP_PROTO_PIPE = 131 # Private IP Encap in IP IP_PROTO_SCTP = 132 # Stream Ctrl Transmission IP_PROTO_FC = 133 # Fibre Channel IP_PROTO_RSVPIGN = 134 # RSVP-E2E-IGNORE IP_PROTO_RAW = 255 # Raw IP packets IP_PROTO_RESERVED = IP_PROTO_RAW # Reserved IP_PROTO_MAX = 255 # XXX - auto-load IP dispatch table from IP_PROTO_* definitions def __load_protos(): g = globals() for k, v in g.iteritems(): if k.startswith('IP_PROTO_'): name = k[9:].lower() try: mod = __import__(name, g, level=1) IP.set_proto(v, getattr(mod, name.upper())) except (ImportError, AttributeError): continue if not IP._protosw: __load_protos() def test_ip(): import udp s = 'E\x00\x00"\x00\x00\x00\x00@\x11r\xc0\x01\x02\x03\x04\x01\x02\x03\x04\x00o\x00\xde\x00\x0e\xbf5foobar' ip = IP(id=0, src='\x01\x02\x03\x04', dst='\x01\x02\x03\x04', p=17) u = udp.UDP(sport=111, dport=222) u.data = 'foobar' u.ulen += len(u.data) ip.data = u ip.len += len(u) assert (str(ip) == s) ip = IP(s) assert (str(ip) == s) assert (ip.udp.sport == 111) assert (ip.udp.data == 'foobar') def test_hl(): # Todo chack this test method s = 'BB\x03\x00\x00\x00\x00\x00\x00\x00\xd0\x00\xec\xbc\xa5\x00\x00\x00\x03\x80\x00\x00\xd0\x01\xf2\xac\xa5"0\x01\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00' try: IP(s) except dpkt.UnpackError: pass def test_opt(): s = '\x4f\x00\x00\x3c\xae\x08\x00\x00\x40\x06\x18\x10\xc0\xa8\x0a\x26\xc0\xa8\x0a\x01\x07\x27\x08\x01\x02\x03\x04\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' ip = IP(s) ip.sum = 0 assert (str(ip) == s) def test_zerolen(): import tcp d = 'X' * 2048 s = 'E\x00\x00\x004\xce@\x00\x80\x06\x00\x00\x7f\x00\x00\x01\x7f\x00\x00\x01\xccN\x0c8`\xff\xc6N_\x8a\x12\x98P\x18@):\xa3\x00\x00' + d ip = IP(s) assert (isinstance(ip.data, tcp.TCP)) assert (ip.tcp.data == d) def test_constuctor(): ip1 = IP(data = "Hello world!") ip2 = IP(data = "Hello world!", len = 0) ip3 = IP(str(ip1)) ip4 = IP(str(ip2)) assert (str(ip1) == str(ip3)) assert (str(ip1) == 'E\x00\x00 \x00\x00\x00\x00@\x00z\xdf\x00\x00\x00\x00\x00\x00\x00\x00Hello world!') assert (str(ip2) == str(ip4)) assert (str(ip2) == 'E\x00\x00 \x00\x00\x00\x00@\x00z\xdf\x00\x00\x00\x00\x00\x00\x00\x00Hello world!') def test_frag(): import ethernet s = "\x00\x23\x20\xd4\x2a\x8c\x00\x23\x20\xd4\x2a\x8c\x08\x00\x45\x00\x00\x54\x00\x00\x40\x00\x40\x01\x25\x8d\x0a\x00\x00\x8f\x0a\x00\x00\x8e\x08\x00\x2e\xa0\x01\xff\x23\x73\x20\x48\x4a\x4d\x00\x00\x00\x00\x78\x85\x02\x00\x00\x00\x00\x00\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37" ip = ethernet.Ethernet(s).ip assert (ip.rf == 0) assert (ip.df == 1) assert (ip.mf == 0) assert (ip.offset == 0) # test setters of fragmentation related attributes. ip.rf = 1 ip.df = 0 ip.mf = 1 ip.offset = 1480 assert (ip.rf == 1) assert (ip.df == 0) assert (ip.mf == 1) assert (ip.offset == 1480) if __name__ == '__main__': test_ip() test_hl() test_opt() test_zerolen() test_constuctor() test_frag() print 'Tests Successful...'
# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from oslo_utils import encodeutils import six from nova import context from nova import exception from nova import test from nova.tests.unit.virt.libvirt import fakelibvirt from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host if sys.version_info > (3,): long = int class GuestTestCase(test.NoDBTestCase): def setUp(self): super(GuestTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.host = host.Host("qemu:///system") self.context = context.get_admin_context() self.domain = mock.Mock(spec=fakelibvirt.virDomain) self.guest = libvirt_guest.Guest(self.domain) def test_repr(self): self.domain.ID.return_value = 99 self.domain.UUIDString.return_value = "UUID" self.domain.name.return_value = "foo" self.assertEqual("<Guest 99 foo UUID>", repr(self.guest)) @mock.patch.object(fakelibvirt.Connection, 'defineXML') def test_create(self, mock_define): libvirt_guest.Guest.create("xml", self.host) mock_define.assert_called_once_with("xml") @mock.patch.object(fakelibvirt.Connection, 'defineXML') def test_create_exception(self, mock_define): mock_define.side_effect = test.TestingException self.assertRaises(test.TestingException, libvirt_guest.Guest.create, "foo", self.host) def test_launch(self): self.guest.launch() self.domain.createWithFlags.assert_called_once_with(0) def test_launch_and_pause(self): self.guest.launch(pause=True) self.domain.createWithFlags.assert_called_once_with( fakelibvirt.VIR_DOMAIN_START_PAUSED) def test_shutdown(self): self.domain.shutdown = mock.MagicMock() self.guest.shutdown() self.domain.shutdown.assert_called_once_with() @mock.patch.object(encodeutils, 'safe_decode') def test_launch_exception(self, mock_safe_decode): self.domain.createWithFlags.side_effect = test.TestingException mock_safe_decode.return_value = "</xml>" self.assertRaises(test.TestingException, self.guest.launch) self.assertEqual(1, mock_safe_decode.called) @mock.patch.object(utils, 'execute') @mock.patch.object(libvirt_guest.Guest, 'get_interfaces') def test_enable_hairpin(self, mock_get_interfaces, mock_execute): mock_get_interfaces.return_value = ["vnet0", "vnet1"] self.guest.enable_hairpin() mock_execute.assert_has_calls([ mock.call( 'tee', '/sys/class/net/vnet0/brport/hairpin_mode', run_as_root=True, process_input='1', check_exit_code=[0, 1]), mock.call( 'tee', '/sys/class/net/vnet1/brport/hairpin_mode', run_as_root=True, process_input='1', check_exit_code=[0, 1])]) @mock.patch.object(encodeutils, 'safe_decode') @mock.patch.object(utils, 'execute') @mock.patch.object(libvirt_guest.Guest, 'get_interfaces') def test_enable_hairpin_exception(self, mock_get_interfaces, mock_execute, mock_safe_decode): mock_get_interfaces.return_value = ["foo"] mock_execute.side_effect = test.TestingException('oops') self.assertRaises(test.TestingException, self.guest.enable_hairpin) self.assertEqual(1, mock_safe_decode.called) def test_get_interfaces(self): self.domain.XMLDesc.return_value = """<domain> <devices> <interface type="network"> <target dev="vnet0"/> </interface> <interface type="network"> <target dev="vnet1"/> </interface> </devices> </domain>""" self.assertEqual(["vnet0", "vnet1"], self.guest.get_interfaces()) def test_get_interfaces_exception(self): self.domain.XMLDesc.return_value = "<bad xml>" self.assertEqual([], self.guest.get_interfaces()) def test_poweroff(self): self.guest.poweroff() self.domain.destroy.assert_called_once_with() def test_resume(self): self.guest.resume() self.domain.resume.assert_called_once_with() @mock.patch('time.time', return_value=1234567890.125) def test_time_sync_no_errors(self, time_mock): self.domain.setTime.side_effect = fakelibvirt.libvirtError('error') self.guest.sync_guest_time() self.domain.setTime.assert_called_once_with(time={ 'nseconds': 125000000, 'seconds': 1234567890}) def test_get_vcpus_info(self): self.domain.vcpus.return_value = ([(0, 1, int(10290000000), 2)], [(True, True)]) vcpus = list(self.guest.get_vcpus_info()) self.assertEqual(0, vcpus[0].id) self.assertEqual(2, vcpus[0].cpu) self.assertEqual(1, vcpus[0].state) self.assertEqual(int(10290000000), vcpus[0].time) def test_delete_configuration(self): self.guest.delete_configuration() self.domain.undefineFlags.assert_called_once_with( fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE) def test_delete_configuration_with_nvram(self): self.guest.delete_configuration(support_uefi=True) self.domain.undefineFlags.assert_called_once_with( fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE | fakelibvirt.VIR_DOMAIN_UNDEFINE_NVRAM) def test_delete_configuration_exception(self): self.domain.undefineFlags.side_effect = fakelibvirt.libvirtError( 'oops') self.domain.ID.return_value = 1 self.guest.delete_configuration() self.domain.undefine.assert_called_once_with() def test_attach_device(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.attach_device(conf) self.domain.attachDeviceFlags.assert_called_once_with( "</xml>", flags=0) def test_attach_device_persistent(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.attach_device(conf, persistent=True) self.domain.attachDeviceFlags.assert_called_once_with( "</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG) def test_attach_device_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.attach_device(conf, live=True) self.domain.attachDeviceFlags.assert_called_once_with( "</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) def test_attach_device_persistent_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.attach_device(conf, persistent=True, live=True) self.domain.attachDeviceFlags.assert_called_once_with( "</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_device(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.detach_device(conf) self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=0) def test_detach_device_persistent(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.detach_device(conf, persistent=True) self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG) def test_detach_device_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.detach_device(conf, live=True) self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) def test_detach_device_persistent_live(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.guest.detach_device(conf, persistent=True, live=True) self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_device_with_retry_from_transient_domain(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" get_config = mock.Mock() get_config.side_effect = [conf, conf, None] dev_path = "/dev/vdb" self.domain.isPersistent.return_value = False retry_detach = self.guest.detach_device_with_retry( get_config, dev_path, live=True, inc_sleep_time=.01) self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) self.domain.detachDeviceFlags.reset_mock() retry_detach() self.assertEqual(1, self.domain.detachDeviceFlags.call_count) def test_detach_device_with_retry_detach_success(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" get_config = mock.Mock() # Force multiple retries of detach get_config.side_effect = [conf, conf, conf, None] dev_path = "/dev/vdb" self.domain.isPersistent.return_value = True retry_detach = self.guest.detach_device_with_retry( get_config, dev_path, live=True, inc_sleep_time=.01) # Ensure we've only done the initial detach call self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) get_config.assert_called_with(dev_path) # Some time later, we can do the wait/retry to ensure detach succeeds self.domain.detachDeviceFlags.reset_mock() retry_detach() # Should have two retries before we pretend device is detached self.assertEqual(2, self.domain.detachDeviceFlags.call_count) def test_detach_device_with_retry_detach_failure(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" # Continue to return some value for the disk config get_config = mock.Mock(return_value=conf) self.domain.isPersistent.return_value = True retry_detach = self.guest.detach_device_with_retry( get_config, "/dev/vdb", live=True, inc_sleep_time=.01, max_retry_count=3) # Ensure we've only done the initial detach call self.domain.detachDeviceFlags.assert_called_once_with( "</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) # Some time later, we can do the wait/retry to ensure detach self.domain.detachDeviceFlags.reset_mock() # Should hit max # of retries self.assertRaises(exception.DeviceDetachFailed, retry_detach) self.assertEqual(4, self.domain.detachDeviceFlags.call_count) def test_detach_device_with_retry_device_not_found(self): get_config = mock.Mock(return_value=None) self.domain.isPersistent.return_value = True ex = self.assertRaises( exception.DeviceNotFound, self.guest.detach_device_with_retry, get_config, "/dev/vdb", live=True) self.assertIn("/dev/vdb", six.text_type(ex)) def test_detach_device_with_retry_device_not_found_alt_name(self): """Tests to make sure we use the alternative name in errors.""" get_config = mock.Mock(return_value=None) self.domain.isPersistent.return_value = True ex = self.assertRaises( exception.DeviceNotFound, self.guest.detach_device_with_retry, get_config, mock.sentinel.device, live=True, alternative_device_name='foo') self.assertIn('foo', six.text_type(ex)) @mock.patch.object(libvirt_guest.Guest, "detach_device") def test_detach_device_with_retry_operation_failed(self, mock_detach): # This simulates a retry of the transient/live domain detach # failing because the device is not found conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.domain.isPersistent.return_value = True get_config = mock.Mock(return_value=conf) fake_device = "vdb" fake_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="operation failed: disk vdb not found", error_code=fakelibvirt.VIR_ERR_OPERATION_FAILED, error_domain=fakelibvirt.VIR_FROM_DOMAIN) mock_detach.side_effect = [None, fake_exc] retry_detach = self.guest.detach_device_with_retry( get_config, fake_device, live=True, inc_sleep_time=.01, max_retry_count=3) # Some time later, we can do the wait/retry to ensure detach self.assertRaises(exception.DeviceNotFound, retry_detach) @mock.patch.object(libvirt_guest.Guest, "detach_device") def test_detach_device_with_retry_invalid_argument(self, mock_detach): # This simulates a persistent domain detach failing because # the device is not found conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice) conf.to_xml.return_value = "</xml>" self.domain.isPersistent.return_value = True get_config = mock.Mock(return_value=conf) fake_device = "vdb" fake_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="invalid argument: no target device vdb", error_code=fakelibvirt.VIR_ERR_INVALID_ARG, error_domain=fakelibvirt.VIR_FROM_DOMAIN) mock_detach.side_effect = fake_exc self.assertRaises(exception.DeviceNotFound, self.guest.detach_device_with_retry, get_config, fake_device, live=True, inc_sleep_time=.01, max_retry_count=3) def test_get_xml_desc(self): self.guest.get_xml_desc() self.domain.XMLDesc.assert_called_once_with(flags=0) def test_get_xml_desc_dump_inactive(self): self.guest.get_xml_desc(dump_inactive=True) self.domain.XMLDesc.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_INACTIVE) def test_get_xml_desc_dump_sensitive(self): self.guest.get_xml_desc(dump_sensitive=True) self.domain.XMLDesc.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_SECURE) def test_get_xml_desc_dump_inactive_dump_sensitive(self): self.guest.get_xml_desc(dump_inactive=True, dump_sensitive=True) self.domain.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) def test_get_xml_desc_dump_migratable(self): self.guest.get_xml_desc(dump_migratable=True) self.domain.XMLDesc.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) def test_has_persistent_configuration(self): self.assertTrue( self.guest.has_persistent_configuration()) self.domain.isPersistent.assert_called_once_with() def test_save_memory_state(self): self.guest.save_memory_state() self.domain.managedSave.assert_called_once_with(0) def test_get_block_device(self): disk = 'vda' gblock = self.guest.get_block_device(disk) self.assertEqual(disk, gblock._disk) self.assertEqual(self.guest, gblock._guest) def test_set_user_password(self): self.guest.set_user_password("foo", "123") self.domain.setUserPassword.assert_called_once_with("foo", "123", 0) def test_get_config(self): xml = "<domain type='kvm'><name>fake</name></domain>" self.domain.XMLDesc.return_value = xml result = self.guest.get_config() self.assertIsInstance(result, vconfig.LibvirtConfigGuest) self.assertEqual('kvm', result.virt_type) self.assertEqual('fake', result.name) def test_get_devices(self): xml = """ <domain type='qemu'> <name>QEMUGuest1</name> <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid> <memory unit='KiB'>219136</memory> <currentMemory unit='KiB'>219136</currentMemory> <vcpu placement='static'>1</vcpu> <os> <type arch='i686' machine='pc'>hvm</type> <boot dev='hd'/> </os> <clock offset='utc'/> <on_poweroff>destroy</on_poweroff> <on_reboot>restart</on_reboot> <on_crash>destroy</on_crash> <devices> <emulator>/usr/bin/qemu</emulator> <disk type='block' device='disk'> <driver name='qemu' type='raw'/> <source dev='/dev/HostVG/QEMUGuest2'/> <target dev='hda' bus='ide'/> <address type='drive' controller='0' bus='0' target='0' unit='0'/> </disk> <disk type='network' device='disk'> <driver name='qemu' type='raw'/> <auth username='myname'> <secret type='iscsi' usage='mycluster_myname'/> </auth> <source protocol='iscsi' name='iqn.1992-01.com.example'> <host name='example.org' port='6000'/> </source> <target dev='vda' bus='virtio'/> </disk> <disk type='network' device='disk'> <driver name='qemu' type='raw'/> <source protocol='iscsi' name='iqn.1992-01.com.example/1'> <host name='example.org' port='6000'/> </source> <target dev='vdb' bus='virtio'/> </disk> <hostdev mode='subsystem' type='pci' managed='yes'> <source> <address domain='0x0000' bus='0x06' slot='0x12' function='0x5'/> </source> </hostdev> <hostdev mode='subsystem' type='pci' managed='yes'> <source> <address domain='0x0000' bus='0x06' slot='0x12' function='0x6'/> </source> </hostdev> <interface type="bridge"> <mac address="fa:16:3e:f9:af:ae"/> <model type="virtio"/> <driver name="qemu"/> <source bridge="qbr84008d03-11"/> <target dev="tap84008d03-11"/> </interface> <controller type='usb' index='0'/> <controller type='pci' index='0' model='pci-root'/> <memballoon model='none'/> </devices> </domain> """ self.domain.XMLDesc.return_value = xml devs = self.guest.get_all_devices() # Only currently parse <disk>, <hostdev> and <interface> elements # hence we're not counting the controller/memballoon self.assertEqual(6, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[3], vconfig.LibvirtConfigGuestHostdev) self.assertIsInstance(devs[4], vconfig.LibvirtConfigGuestHostdev) self.assertIsInstance(devs[5], vconfig.LibvirtConfigGuestInterface) devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestDisk) self.assertEqual(3, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk) devs = self.guest.get_all_disks() self.assertEqual(3, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk) devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestHostdev) self.assertEqual(2, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestHostdev) self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestHostdev) devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestInterface) self.assertEqual(1, len(devs)) self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestInterface) cfg = vconfig.LibvirtConfigGuestInterface() cfg.parse_str(""" <interface type="bridge"> <mac address="fa:16:3e:f9:af:ae"/> <model type="virtio"/> <driver name="qemu"/> <source bridge="qbr84008d03-11"/> <target dev="tap84008d03-11"/> </interface>""") self.assertIsNotNone( self.guest.get_interface_by_cfg(cfg)) self.assertIsNone(self.guest.get_interface_by_cfg(None)) def test_get_info(self): self.domain.info.return_value = (1, 2, 3, 4, 5) self.domain.ID.return_value = 6 info = self.guest.get_info(self.host) self.domain.info.assert_called_once_with() self.assertEqual(1, info.state) self.assertEqual(2, info.max_mem_kb) self.assertEqual(3, info.mem_kb) self.assertEqual(4, info.num_cpu) self.assertEqual(5, info.cpu_time_ns) self.assertEqual(6, info.id) def test_get_power_state(self): self.domain.info.return_value = (1, 2, 3, 4, 5) power = self.guest.get_power_state(self.host) self.assertEqual(1, power) def test_is_active_when_domain_is_active(self): with mock.patch.object(self.domain, "isActive", return_value=True): self.assertTrue(self.guest.is_active()) def test_is_active_when_domain_not_active(self): with mock.patch.object(self.domain, "isActive", return_value=False): self.assertFalse(self.guest.is_active()) def test_freeze_filesystems(self): self.guest.freeze_filesystems() self.domain.fsFreeze.assert_called_once_with() def test_thaw_filesystems(self): self.guest.thaw_filesystems() self.domain.fsThaw.assert_called_once_with() def _conf_snapshot(self): conf = mock.Mock(spec=vconfig.LibvirtConfigGuestSnapshotDisk) conf.to_xml.return_value = '<disk/>' return conf def test_snapshot(self): conf = self._conf_snapshot() self.guest.snapshot(conf) self.domain.snapshotCreateXML('<disk/>', flags=0) conf.to_xml.assert_called_once_with() def test_snapshot_no_metadata(self): conf = self._conf_snapshot() self.guest.snapshot(conf, no_metadata=True) self.domain.snapshotCreateXML( '<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA) conf.to_xml.assert_called_once_with() def test_snapshot_disk_only(self): conf = self._conf_snapshot() self.guest.snapshot(conf, disk_only=True) self.domain.snapshotCreateXML( '<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) conf.to_xml.assert_called_once_with() def test_snapshot_reuse_ext(self): conf = self._conf_snapshot() self.guest.snapshot(conf, reuse_ext=True) self.domain.snapshotCreateXML( '<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) conf.to_xml.assert_called_once_with() def test_snapshot_quiesce(self): conf = self._conf_snapshot() self.guest.snapshot(conf, quiesce=True) self.domain.snapshotCreateXML( '<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) conf.to_xml.assert_called_once_with() def test_snapshot_all(self): conf = self._conf_snapshot() self.guest.snapshot(conf, no_metadata=True, disk_only=True, reuse_ext=True, quiesce=True) self.domain.snapshotCreateXML( '<disk/>', flags=( fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)) conf.to_xml.assert_called_once_with() def test_pause(self): self.guest.pause() self.domain.suspend.assert_called_once_with() def test_migrate_v1(self): self.guest.migrate('an-uri', flags=1, bandwidth=2) self.domain.migrateToURI.assert_called_once_with( 'an-uri', flags=1, bandwidth=2) def test_migrate_v2(self): self.guest.migrate('an-uri', domain_xml='</xml>', flags=1, bandwidth=2) self.domain.migrateToURI2.assert_called_once_with( 'an-uri', miguri=None, dxml='</xml>', flags=1, bandwidth=2) def test_migrate_v3(self): self.guest.migrate('an-uri', domain_xml='</xml>', params={'p1': 'v1'}, flags=1, bandwidth=2) self.domain.migrateToURI3.assert_called_once_with( 'an-uri', flags=1, params={'p1': 'v1'}) def test_abort_job(self): self.guest.abort_job() self.domain.abortJob.assert_called_once_with() def test_migrate_configure_max_downtime(self): self.guest.migrate_configure_max_downtime(1000) self.domain.migrateSetMaxDowntime.assert_called_once_with(1000) class GuestBlockTestCase(test.NoDBTestCase): def setUp(self): super(GuestBlockTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.host = host.Host("qemu:///system") self.context = context.get_admin_context() self.domain = mock.Mock(spec=fakelibvirt.virDomain) self.guest = libvirt_guest.Guest(self.domain) self.gblock = self.guest.get_block_device('vda') def test_abort_job(self): self.gblock.abort_job() self.domain.blockJobAbort.assert_called_once_with('vda', flags=0) def test_abort_job_async(self): self.gblock.abort_job(async=True) self.domain.blockJobAbort.assert_called_once_with( 'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC) def test_abort_job_pivot(self): self.gblock.abort_job(pivot=True) self.domain.blockJobAbort.assert_called_once_with( 'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) def test_get_job_info(self): self.domain.blockJobInfo.return_value = { "type": 1, "bandwidth": 18, "cur": 66, "end": 100} info = self.gblock.get_job_info() self.assertEqual(1, info.job) self.assertEqual(18, info.bandwidth) self.assertEqual(66, info.cur) self.assertEqual(100, info.end) self.domain.blockJobInfo.assert_called_once_with('vda', flags=0) def test_resize(self): self.gblock.resize(10) self.domain.blockResize.assert_called_once_with('vda', 10) def test_rebase(self): self.gblock.rebase("foo") self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=0) def test_rebase_shallow(self): self.gblock.rebase("foo", shallow=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW) def test_rebase_reuse_ext(self): self.gblock.rebase("foo", reuse_ext=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) def test_rebase_copy(self): self.gblock.rebase("foo", copy=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY) def test_rebase_relative(self): self.gblock.rebase("foo", relative=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) def test_rebase_copy_dev(self): self.gblock.rebase("foo", copy_dev=True) self.domain.blockRebase.assert_called_once_with( 'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV) def test_commit(self): self.gblock.commit("foo", "top") self.domain.blockCommit.assert_called_once_with( 'vda', "foo", "top", 0, flags=0) def test_commit_relative(self): self.gblock.commit("foo", "top", relative=True) self.domain.blockCommit.assert_called_once_with( 'vda', "foo", "top", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) def test_is_job_complete_cur_end_zeros(self): self.domain.blockJobInfo.return_value = { "type": 4, "bandwidth": 18, "cur": 0, "end": 0} is_complete = self.gblock.is_job_complete() self.assertFalse(is_complete) def test_is_job_complete_current_lower_than_end(self): self.domain.blockJobInfo.return_value = { "type": 4, "bandwidth": 18, "cur": 95, "end": 100} is_complete = self.gblock.is_job_complete() self.assertFalse(is_complete) def test_is_job_complete_not_ready(self): gblock = self.guest.get_block_device('vda') disk = vconfig.LibvirtConfigGuestDisk() disk.mirror = vconfig.LibvirtConfigGuestDiskMirror() with mock.patch.object(self.guest, 'get_disk', return_value=disk): self.domain.blockJobInfo.return_value = { "type": 4, "bandwidth": 18, "cur": 100, "end": 100} is_complete = gblock.is_job_complete() self.assertFalse(is_complete) def test_is_job_complete_ready(self): gblock = self.guest.get_block_device('vda') disk = vconfig.LibvirtConfigGuestDisk() disk.mirror = vconfig.LibvirtConfigGuestDiskMirror() disk.mirror.ready = 'yes' with mock.patch.object(self.guest, 'get_disk', return_value=disk): self.domain.blockJobInfo.return_value = { "type": 4, "bandwidth": 18, "cur": 100, "end": 100} is_complete = gblock.is_job_complete() self.assertTrue(is_complete) def test_is_job_complete_no_job(self): self.domain.blockJobInfo.return_value = {} is_complete = self.gblock.is_job_complete() self.assertTrue(is_complete) def test_is_job_complete_exception(self): self.domain.blockJobInfo.side_effect = fakelibvirt.libvirtError('fake') self.assertRaises(fakelibvirt.libvirtError, self.gblock.is_job_complete) class JobInfoTestCase(test.NoDBTestCase): def setUp(self): super(JobInfoTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.conn = fakelibvirt.openAuth("qemu:///system", [[], lambda: True]) xml = ("<domain type='kvm'>" " <name>instance-0000000a</name>" "</domain>") self.dom = self.conn.createXML(xml, 0) self.guest = libvirt_guest.Guest(self.dom) libvirt_guest.JobInfo._have_job_stats = True @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_stats(self, mock_stats, mock_info): mock_stats.return_value = { "type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, "memory_total": 75, "memory_processed": 50, "memory_remaining": 33, "some_new_libvirt_stat_we_dont_know_about": 83 } info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type) self.assertEqual(75, info.memory_total) self.assertEqual(50, info.memory_processed) self.assertEqual(33, info.memory_remaining) self.assertEqual(0, info.disk_total) self.assertEqual(0, info.disk_processed) self.assertEqual(0, info.disk_remaining) mock_stats.assert_called_once_with() self.assertFalse(mock_info.called) @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_no_support(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "virDomainGetJobStats not implemented", fakelibvirt.VIR_ERR_NO_SUPPORT) mock_info.return_value = [ fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, 100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3] info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type) self.assertEqual(100, info.time_elapsed) self.assertEqual(99, info.time_remaining) self.assertEqual(10, info.data_total) self.assertEqual(11, info.data_processed) self.assertEqual(12, info.data_remaining) self.assertEqual(75, info.memory_total) self.assertEqual(50, info.memory_processed) self.assertEqual(33, info.memory_remaining) self.assertEqual(1, info.disk_total) self.assertEqual(2, info.disk_processed) self.assertEqual(3, info.disk_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_attr_error(self, mock_stats, mock_info): mock_stats.side_effect = AttributeError("No such API") mock_info.return_value = [ fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, 100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3] info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type) self.assertEqual(100, info.time_elapsed) self.assertEqual(99, info.time_remaining) self.assertEqual(10, info.data_total) self.assertEqual(11, info.data_processed) self.assertEqual(12, info.data_remaining) self.assertEqual(75, info.memory_total) self.assertEqual(50, info.memory_processed) self.assertEqual(33, info.memory_remaining) self.assertEqual(1, info.disk_total) self.assertEqual(2, info.disk_processed) self.assertEqual(3, info.disk_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_stats_no_domain(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain with UUID blah", fakelibvirt.VIR_ERR_NO_DOMAIN) info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() self.assertFalse(mock_info.called) @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_no_domain(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "virDomainGetJobStats not implemented", fakelibvirt.VIR_ERR_NO_SUPPORT) mock_info.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain with UUID blah", fakelibvirt.VIR_ERR_NO_DOMAIN) info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_stats_operation_invalid(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Domain is not running", fakelibvirt.VIR_ERR_OPERATION_INVALID) info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() self.assertFalse(mock_info.called) @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_operation_invalid(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "virDomainGetJobStats not implemented", fakelibvirt.VIR_ERR_NO_SUPPORT) mock_info.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Domain is not running", fakelibvirt.VIR_ERR_OPERATION_INVALID) info = self.guest.get_job_info() self.assertIsInstance(info, libvirt_guest.JobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with()
from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404 from django.http import JsonResponse from django.urls import reverse from django.http import HttpResponseRedirect, HttpResponseForbidden from django.conf import settings import django.contrib.auth import django.views import json import numpy import logging import docutils import docutils.core import sheets_backend.sockets import sheets_app.models as models import sheets_app.book_demos logger = logging.getLogger(__name__) # Create your views here. def process_rst(s): return docutils.core.publish_parts(s, writer_name="html")['html_body'] def get_user_sheet_id(user, sheet_id): return str(user.id) + '_' + sheet_id def mypipeline(backend, strategy, details, response, user=None, *args, **kwargs): print('mypipline') print('backend ',backend) print('strategy',strategy) print('details ',details) print('response',response) print('user ',user) user.profile_image_url = response['image'].get('url') user.save() def cells_values(ret): cells = ret.cells def f(c): return c.value return numpy.vectorize(f, otypes=[str])(cells).tolist() def cells_array(ret): cells = ret.cells def f(c): v = c.value #if isinstance(v, str): v = "\"" + v + "\"" return json.dumps([c.string, v]) return numpy.vectorize(f, otypes=[str])(cells).tolist() def login_redirect(url_next): return HttpResponseRedirect( reverse('social:begin', args=['google-oauth2',])+'?next='+url_next) def check_permission(request, book): if not book.is_demo: if not request.user.is_authenticated(): return login_redirect(reverse('index')) if not (request.user == book.user_creator): return HttpResponseForbidden("You shall not pass.") def index(request, messages=[]): if not request.user.is_authenticated(): return login_redirect(reverse('index')) user = django.contrib.auth.get_user(request) logger.debug('index') logger.debug('GET',list(request.GET.items())) if user.is_authenticated(): books = list(user.book_user_creator.all()) else: books = [] context = { 'user': user, 'books': books, 'url_login_redirect': django.urls.reverse('index'), 'url_logout_redirect': django.urls.reverse('index'), 'url_select_account_redirect': django.urls.reverse('index'), 'messages': messages } return render(request, 'sheets_app/index.html', context) def book_demo(request, book_demo_name): cls = sheets_app.book_demos.get_func(book_demo_name) o = cls() book = book_new_func(book_demo_name) book.is_demo = True book.save() bp = sheets_backend.sockets.BookProxy(book.book_id, settings.WEB_SHEETS_PORT) o.setup(bp) return redirect('sheets:book', book.id) class SimpleMessage(object): def __init__(self, msgtype, msg): self.msgtype = msgtype self.msg = msg class BookView(django.views.View): def post(self, request, book_id): return self.do_view(request, book_id, self.post_sub) def get(self, request, book_id): return self.do_view(request, book_id, self.get_sub) def do_view(self, request, book_id, sub_function): book = get_object_or_404(models.Book, pk=book_id) if not book.is_demo: if not request.user.is_authenticated(): return login_redirect(reverse('index')) if not (request.user == book.user_creator): return HttpResponseForbidden("You shall not pass.") bp = sheets_backend.sockets.BookProxy(book.book_id, settings.WEB_SHEETS_PORT) return sub_function(request, book, bp) class BookViewView(BookView): def get_sub(self, request, book, bp): user = django.contrib.auth.get_user(request) sheet_key = '0' ret = bp.get_sheet_data(sheet_key) print(ret) print(repr(ret.cells)) cells = cells_array(ret) docs_html = process_rst(ret.docs) print('cells',repr(cells)) print('docs', repr(ret.docs)) print('docs_html', repr(docs_html)) context = { 'cells': json.dumps(cells), 'script_pre': ret.script_pre, 'script_pre_output': ret.script_pre_output, 'script_post': ret.script_post, 'script_post_output': ret.script_post_output, 'docs_html': docs_html, 'user': user, 'book': book, 'sheet_key': sheet_key, 'url_login_redirect': django.urls.reverse('index'), 'url_logout_redirect': django.urls.reverse('index'), 'url_select_account_redirect': django.urls.reverse('index'), } return render(request, 'sheets_app/sheet.html', context) class SetCellView(BookView): def post_sub(self, request, book, bp): sheet_key = request.POST["sheet_key"] r = int(request.POST['r']) c = int(request.POST['c']) s = request.POST['s'] ret = bp.set_cell(sheet_key, r, c, s) ret = bp.get_cell_data(sheet_key) cells = cells_array(ret) return JsonResponse({'cells':cells}) class ExceptionWithResponse(Exception): def __init__(self, response): super(ExceptionWithResponse, self).__init__(str(self)) self.response = response def sheet_data_response(bp, sheet_key): ret = bp.get_sheet_data(sheet_key) cells = cells_array(ret) logger.debug('sheet_data') logger.debug('script_pre_output') logger.debug(ret.script_pre_output) return JsonResponse({ 'cells': cells, 'script_pre': ret.script_pre, 'script_pre_output': ret.script_pre_output, 'script_post': ret.script_post, 'script_post_output': ret.script_post_output, }) class GetSheetDataView(BookView): def post_sub(self, request, book, bp): sheet_key = request.POST["sheet_key"] return sheet_data_response(bp, sheet_key) class SetScriptPreView(BookView): def post_sub(self, request, book, bp): sheet_key = request.POST["sheet_key"] s = request.POST['text'] ret = bp.set_script_pre(s) return sheet_data_response(bp, sheet_key) class SetScriptPostView(BookView): def post_sub(self, request, book, bp): sheet_key = request.POST["sheet_key"] s = request.POST['text'] ret = bp.set_script_post(s) ret = bp.get_script_post_output() return JsonResponse({'script_post_output': ret.script_post_output}) class AlterSheetView(BookView): def post_sub(self, request, book, bp): if not request.POST['i']: i = None else: i = int(request.POST['i']) sheet_key = request.POST["sheet_key"] ret = self.func(bp, sheet_key, i) ret = bp.get_cell_data(sheet_key) cells = cells_array(ret) return JsonResponse({'cells':cells}) class AddColumnView(AlterSheetView): func = staticmethod(sheets_backend.sockets.BookProxy.add_column) class AddRowView(AlterSheetView): func = staticmethod(sheets_backend.sockets.BookProxy.add_row) def book_new_func(book_name, user=None): c = sheets_backend.sockets.Client(settings.WEB_SHEETS_PORT) ret = c.book_new() print('new book id', repr(ret.book_id), type(ret.book_id)) b = models.Book() b.user_creator = user b.book_id = ret.book_id b.book_name = book_name b.is_demo = False b.save() return b @login_required def book_new(request): book_name = request.POST['book_name'] b = book_new_func(book_name, request.user) return redirect('sheets:book', b.id)
import logging import json from concurrent import futures from contextlib import ExitStack import requests from .utils import timed logger = logging.getLogger(__name__) BASE_URL = 'https://hacker-news.firebaseio.com/v0/' # exceptions class HNException(Exception): """Base HN Exception. """ class HNNotFoundException(HNException): """Not found exception. """ pass # main class class HN(object): """Represents a HN Firebase Client. Simplifies fetching imformation from Hacker News API provided through Firebase. """ @staticmethod def stories(base, page=1, page_size=50): """Fetches stories. Args: base (string): One of top, new, best, ask, show, or job. page (int): The page number used for pagination. Defaults to 1. page_size (int): The number of items to return per page. Defaults to 50. Returns: Stories: A Stories object. Raises: HnException: If invalid base is specified. """ if base not in ('top', 'new', 'best', 'ask', 'show', 'job'): raise HNException('Invalid base specified.') url = BASE_URL + '{}stories.json'.format(base) request = requests.get(url) data = request.json() # paginate the data start_index = (page - 1) * page_size end_index = start_index + page_size data = data[start_index:end_index] return Stories(base, page, page_size, data) @staticmethod def item(id): """Fetches an item. Args: id (int): The id of an item. Returns: Item: An Item object. Raises: HNNotFoundException: If item is not found. """ url = BASE_URL + 'item/{}.json'.format(id) request = requests.get(url) data = request.json() if not data: raise HNNotFoundException('Item {} not found.'.format(id)) return Item(id, data) @staticmethod def user(username): """Fetches a user. Args: username (string): The username of a user. Returns: User: A User object. Raises: HNNotFoundException: If user is not found. """ url = BASE_URL + 'user/{}.json'.format(username) request = requests.get(url) data = request.json() if not data: raise HNNotFoundException('User {} not found.'.format(username)) return User(username, data) @staticmethod def updates(): """Fetches recent updates. Returns: Updates: An updates object. """ url = BASE_URL + 'updates.json' request = requests.get(url) data = request.json() return Updates(data) class Stories(object): """Represents HN stories. Not meant to be used directly. Attributes: base (str): Indicates a HN stories base. page (int): Page number. page_size (int): Page size. data (dict): Original response from Firebase. """ def __init__(self, base, page, page_size, data): """Inits Stories with base, page, page_size, and data.""" self.base = base self.page = page self.page_size = page_size self.data = data def expanded(self, max_workers=None): """Fetches data for each individual stories item. Returns: list: An expanded list of stories. """ with ExitStack() as stack: exc = stack.enter_context(futures.ThreadPoolExecutor(max_workers=max_workers)) session = requests.Session() urls = {id: BASE_URL + 'item/{}.json'.format(id) for id in self.data} futures_to_ids = {exc.submit(session.get, url): id for id, url in urls.items()} items = [] for future in futures_to_ids: data = future.result().json() items.append(data) return items class Item(object): """Represents HN item. Not meant to be used directly. Attributes: id (int): Item's id data (dict): Original response from Firebase. """ def __init__(self, id, data): """Inits Item with id and data.""" self.id = id self.data = data @staticmethod def _get_comments(kids, exc, session, depth): """Internal method to fetch comments recursively. """ if not kids: raise HNNotFoundException('No comments found.') if depth is not None and depth < 1: raise HNNotFoundException('Comment depth reached') if depth is not None: depth -= 1 urls = {id: BASE_URL + 'item/{}.json'.format(id) for id in kids} futures_to_ids = {exc.submit(session.get, url): id for id, url in urls.items()} items = [] for future in futures_to_ids: data = future.result().json() try: comments = Item._get_comments(data.get('kids'), exc, session, depth) data['children'] = comments except HNException: pass items.append(data) return items def comments(self, page=1, page_size=50, depth=None, max_workers=None): """Fetches item's comments. Fetching is done concurrently using multiple workers. Arguments: page (int): The page number used for pagination. Defaults to 1. page_size (int): The number of items to return per page. Defaults to 50. depth (int): The number for the depth for comments. For any given depth, a max of depth descendants will be returned. max_workers (int): Number of workers to use for fetching. Defaults to None, meaning number of processors on the machine x 5. Returns: list: A list of objects, each representing a comment. Each comment has a `children` key representing it's children, if there are any and if depth isn't exceeded. """ kids = self.data.get('kids') if not kids: raise HNNotFoundException('No comments found.') with ExitStack() as stack: exc = stack.enter_context(futures.ThreadPoolExecutor(max_workers=max_workers)) session = stack.enter_context(requests.Session()) start_index = (page - 1) * page_size end_index = start_index + page_size kids = kids[start_index:end_index] try: items = Item._get_comments(kids, exc, session, depth) except HNNotFoundException: return [] return items class User(object): """Represents HN user. Not meant to be used directly. Attributes: _data (dict): Original response from Firebase. """ def __init__(self, username, data): """Inits User with username and data.""" self.username = username self.data = data class Updates(object): """Represents HN recent updates. Not meant to be used directly. Attributes: items (list(int)): List of recently updated item ids. profiles (list(str)): List of recently updates user profile ids. _data (dict): Original response from Firebase. """ def __init__(self, data): """Inits Updates with data.""" self.data = data
# Copyright 2013 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for melange.request.access.""" import httplib import unittest from google.appengine.ext import ndb from django import http from melange.models import organization as ndb_org_model from melange.models import profile as ndb_profile_model from melange.request import access from melange.request import exception from soc.models import organization as org_model from soc.models import profile as profile_model from soc.models import program as program_model from soc.views.helper import request_data from soc.modules.seeder.logic.seeder import logic as seeder_logic from tests import org_utils from tests import profile_utils from tests import program_utils from tests import timeline_utils class Explosive(object): """Raises an exception on any attribute access.""" def __getattribute__(self, attribute_name): raise ValueError() class NoneAllowedAccessChecker(access.AccessChecker): """Tests only implementation of access checker that grants access to nobody and always raises exception.Forbidden error. The exception will contain identifier of particular instance of this class so that callers can recognize objects that raised the exception after all. """ def __init__(self, identifier): """Initializes a new instance of the access checker. Args: identifier: a string that identifies this checker. """ self._identifier = identifier def checkAccess(self, data, check): """See access.AccessChecker.checkAccess for specification.""" raise exception.Forbidden(message=self._identifier) class EnsureLoggedInTest(unittest.TestCase): """Unit tests for ensureLoggedIn function.""" def testForLoggedInUser(self): """Tests that no exception is raised for a logged-in user.""" data = request_data.RequestData(None, None, {}) data._gae_user = 'unused' access.ensureLoggedIn(data) def testForLoggedOutUser(self): """Tests that exception is raised for a non logged-in user.""" data = request_data.RequestData(None, None, {}) data._gae_user = None with self.assertRaises(exception.LoginRequired): access.ensureLoggedIn(data) class EnsureLoggedOutTest(unittest.TestCase): """Unit tests for ensureLoggedOut function.""" def testForLoggedInUser(self): """Tests that exception is raised for a logged-in user.""" data = request_data.RequestData(http.HttpRequest(), None, {}) data._gae_user = 'unused' with self.assertRaises(exception.Redirect): access.ensureLoggedOut(data) def testForLoggedOutUser(self): """Tests that no exception is raised for a non logged-in user.""" data = request_data.RequestData(http.HttpRequest(), None, {}) data._gae_user = None access.ensureLoggedOut(data) class AllAllowedAccessCheckerTest(unittest.TestCase): """Tests the AllAllowedAccessChecker class.""" def testAccessAllowedWithPhonyInputs(self): """Tests that access is allowed without examining inputs.""" access_checker = access.AllAllowedAccessChecker() access_checker.checkAccess(Explosive(), Explosive()) class ProgramAdministratorAccessCheckerTest(unittest.TestCase): """Tests the ProgramAdministratorAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" self.sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=self.sponsor.key()) # seed a user who will be tested for access self.user = profile_utils.seedNDBUser() profile_utils.loginNDB(self.user) kwargs = { 'sponsor': self.sponsor.key().name(), 'program': self.program.program_id, } self.data = request_data.RequestData(None, None, kwargs) def testProgramAdministratorsAllowedAccess(self): """Tests that a program administrator is allowed access.""" # make the user a program administrator self.user.host_for = [ndb.Key.from_old_key(self.program.key())] self.user.put() access_checker = access.ProgramAdministratorAccessChecker() access_checker.checkAccess(self.data, None) def testOrganizationAdministratorsDeniedAccess(self): """Tests that an organization administrator is denied access.""" # seed a profile who is an organization admin org = org_utils.seedOrganization(self.program.key()) profile_utils.seedNDBProfile( self.program.key(), user=self.user, admin_for=[org.key]) access_checker = access.ProgramAdministratorAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.message, access._MESSAGE_NOT_PROGRAM_ADMINISTRATOR) def testMentorDeniedAccess(self): """Tests that a mentor is denied access.""" # seed a profile who is a mentor org = org_utils.seedOrganization(self.program.key()) profile_utils.seedNDBProfile( self.program.key(), user=self.user, mentor_for=[org.key]) access_checker = access.ProgramAdministratorAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.message, access._MESSAGE_NOT_PROGRAM_ADMINISTRATOR) def testStudentDeniedAccess(self): """Tests that students are denied access.""" # seed a profile who is a student profile_utils.seedNDBStudent(self.program, user=self.user) access_checker = access.ProgramAdministratorAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.message, access._MESSAGE_NOT_PROGRAM_ADMINISTRATOR) def testAnonymousDeniedAccess(self): """Tests that logged-out users are denied access.""" access_checker = access.ProgramAdministratorAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.message, access._MESSAGE_NOT_PROGRAM_ADMINISTRATOR) class DeveloperAccessCheckerTest(unittest.TestCase): """Tests the DeveloperAccessChecker class.""" def testDeveloperAccessAllowed(self): data = request_data.RequestData(None, None, None) # TODO(nathaniel): Reaching around RequestHandler public API. data._is_developer = True access_checker = access.DeveloperAccessChecker() access_checker.checkAccess(data, None) def testNonDeveloperAccessDenied(self): data = request_data.RequestData(None, None, None) # TODO(nathaniel): Reaching around RequestHandler public API. data._is_developer = False access_checker = access.DeveloperAccessChecker() with self.assertRaises(exception.UserError): access_checker.checkAccess(data, None) class ConjuctionAccessCheckerTest(unittest.TestCase): """Tests for ConjuctionAccessChecker class.""" def testForAllPassingCheckers(self): """Tests that checker passes if all sub-checkers pass.""" checkers = [access.AllAllowedAccessChecker() for _ in xrange(5)] access_checker = access.ConjuctionAccessChecker(checkers) access_checker.checkAccess(None, None) def testFirstCheckerFails(self): """Tests that checker fails if the first sub-checker fails.""" checkers = [NoneAllowedAccessChecker('first')] checkers.extend([access.AllAllowedAccessChecker() for _ in xrange(4)]) access_checker = access.ConjuctionAccessChecker(checkers) with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(None, None) self.assertEqual(context.exception.message, 'first') def testLastCheckerFails(self): """Tests that checker fails if the last sub-checker fails.""" checkers = [access.AllAllowedAccessChecker() for _ in xrange(4)] checkers.append(NoneAllowedAccessChecker('last')) access_checker = access.ConjuctionAccessChecker(checkers) with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(None, None) self.assertEqual(context.exception.message, 'last') class NonStudentUrlProfileAccessCheckerTest(unittest.TestCase): """Tests for NonStudentUrlProfileAccessChecker class.""" def setUp(self): """See unittest.setUp for specification.""" sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=sponsor.key()) self.kwargs = { 'sponsor': sponsor.key().name(), 'program': self.program.program_id, } def testUrlUserWithNoProfileAccessDenied(self): """Tests that access is denied for a user that does not have a profile.""" self.kwargs['user'] = 'non_existing_user' data = request_data.RequestData(None, None, self.kwargs) access_checker = access.NonStudentUrlProfileAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(data, None) self.assertEqual(context.exception.status, httplib.NOT_FOUND) def testStudentAccessDenied(self): """Tests that access is denied for a user with a student profile.""" # additionally, seed a profile who is not a student # access should be still denied as the check corresponds to URL profile user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedNDBProfile(self.program.key(), user=user) # seed URL profile who is a student url_profile = profile_utils.seedNDBStudent(self.program) self.kwargs['user'] = url_profile.profile_id data = request_data.RequestData(None, None, self.kwargs) access_checker = access.NonStudentUrlProfileAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(data, None) self.assertEqual(context.exception.message, access._MESSAGE_STUDENTS_DENIED) def testNonStudentAccessGranted(self): """Tests that access is granted for users with non-student accounts.""" # seed URL profile who is not a student url_profile = profile_utils.seedNDBProfile(self.program.key()) self.kwargs['user'] = url_profile.profile_id data = request_data.RequestData(None, None, self.kwargs) access_checker = access.NonStudentUrlProfileAccessChecker() access_checker.checkAccess(data, None) class NonStudentProfileAccessCheckerTest(unittest.TestCase): """Tests for NonStudentProfileAccessChecker class.""" def setUp(self): """See unittest.setUp for specification.""" sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=sponsor.key()) kwargs = { 'sponsor': sponsor.key().name(), 'program': self.program.link_id, } self.data = request_data.RequestData(None, None, kwargs) def testUserWithNoProfileAccessDenied(self): """Tests that access is denied if current user has no profile""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) access_checker = access.NON_STUDENT_PROFILE_ACCESS_CHECKER with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testUserWithStudentProfileAccessDenied(self): """Tests that access is denied if current user has student profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedSOCStudent(self.program, user=user) access_checker = access.NON_STUDENT_PROFILE_ACCESS_CHECKER with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testUserWithNonStudentProfileAccessGranted(self): """Tests that access is granted if current user has non-student profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedNDBProfile(self.program.key(), user=user) access_checker = access.NON_STUDENT_PROFILE_ACCESS_CHECKER access_checker.checkAccess(self.data, None) class ProgramActiveAccessCheckerTest(unittest.TestCase): """Tests for ProgramActiveAccessChecker class.""" def setUp(self): """See unittest.setUp for specification.""" self.program = seeder_logic.seed(program_model.Program) def testForNonExistingProgram(self): """Tests that access is denied if the program does not exist.""" data = request_data.RequestData(None, None, None) data._program = None access_checker = access.ProgramActiveAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(data, None) self.assertEqual(context.exception.message, access._MESSAGE_PROGRAM_NOT_EXISTING) def testForNotActiveProgram(self): """Tests that access if denied if the program is not active.""" data = request_data.RequestData(None, None, None) data._program = self.program data._timeline = request_data.TimelineHelper(self.program.timeline, None) access_checker = access.ProgramActiveAccessChecker() # the program is not visible data._program.status = program_model.STATUS_INVISIBLE data._program.timeline.program_start = timeline_utils.past() data._program.timeline.program_end = timeline_utils.future() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(data, None) self.assertEqual(context.exception.message, access._MESSAGE_PROGRAM_NOT_ACTIVE) # the program is has already ended data._program.status = program_model.STATUS_VISIBLE data._program.timeline.program_start = timeline_utils.past(delta=100) data._program.timeline.program_end = timeline_utils.past(delta=50) with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(data, None) self.assertEqual(context.exception.message, access._MESSAGE_PROGRAM_NOT_ACTIVE) def testForActiveProgram(self): """Tests that access is granted if the program is active.""" data = request_data.RequestData(None, None, None) data._program = self.program data._timeline = request_data.TimelineHelper(self.program.timeline, None) access_checker = access.ProgramActiveAccessChecker() # program is active and visible data._program.status = program_model.STATUS_VISIBLE data._program.timeline.program_start = timeline_utils.past() data._program.timeline.program_end = timeline_utils.future() access_checker.checkAccess(data, None) class IsUrlUserAccessCheckerTest(unittest.TestCase): """Tests for IsUrlUserAccessChecker class.""" def setUp(self): """See unittest.setUp for specification.""" self.data = request_data.RequestData(None, None, {}) self.data._ndb_user = profile_utils.seedNDBUser() def testForMissingUserData(self): """Tests for URL data that does not contain any user data.""" access_checker = access.IsUrlUserAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.BAD_REQUEST) def testNonLoggedInUserAccessDenied(self): """Tests that exception is raised for a non logged-in user.""" data = request_data.RequestData(None, None, {}) data._gae_user = None data.kwargs['user'] = 'some_username' access_checker = access.IsUrlUserAccessChecker() with self.assertRaises(exception.LoginRequired): access_checker.checkAccess(data, None) def testNonUserAccessDenied(self): """Tests that access is denied for a user with no User entity.""" self.data.kwargs['user'] = self.data._ndb_user.user_id self.data._ndb_user = None access_checker = access.IsUrlUserAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testOtherUserAccessDenied(self): """Tests that access is denied for a user who is not defined in URL.""" self.data.kwargs['user'] = 'other' access_checker = access.IsUrlUserAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testSameUserAccessGranted(self): """Tests that access is granted for a user who is defined in URL.""" self.data.kwargs['user'] = self.data._ndb_user.user_id access_checker = access.IsUrlUserAccessChecker() access_checker.checkAccess(self.data, None) class IsUserOrgAdminForUrlOrgTest(unittest.TestCase): """Tests for IsUserOrgAdminForUrlOrg class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" sponsor = program_utils.seedSponsor() program_properties = { 'sponsor': sponsor, 'scope': sponsor, } program = seeder_logic.seed( program_model.Program, properties=program_properties) org_properties = { 'program': program, 'scope': program, } self.organization = seeder_logic.seed( org_model.Organization, properties=org_properties) kwargs = { 'sponsor': sponsor.key().name(), 'program': program.link_id, 'organization': self.organization.link_id, } self.data = request_data.RequestData(None, None, kwargs) def testNoProfileAccessDenied(self): """Tests that error is raised if profile does not exist.""" self.data._profile = None access_checker = access.IsUserOrgAdminForUrlOrg() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) self.assertEqual(context.exception.message, access._MESSAGE_NO_PROFILE) def testForNonExistingOrg(self): """Tests that error is raised when organization does not exist.""" profile_properties = { 'is_org_admin': True, 'org_admin_for': [self.organization.key()], 'is_mentor': True, 'mentor_for': [self.organization.key()], 'is_student': False, } self.data._profile = seeder_logic.seed( profile_model.Profile, properties=profile_properties) self.data.kwargs['organization'] = 'non_existing_org_id' access_checker = access.IsUserOrgAdminForUrlOrg() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.NOT_FOUND) def testMentorAccessDenied(self): """Tests that a mentor is denied access.""" profile_properties = { 'is_org_admin': False, 'org_admin_for': [], 'is_mentor': True, 'mentor_for': [self.organization.key()], 'is_student': False, } self.data._profile = seeder_logic.seed( profile_model.Profile, properties=profile_properties) self.data._url_org = self.organization access_checker = access.IsUserOrgAdminForUrlOrg() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testStudentAccessDenied(self): """Tests that a student is denied access.""" profile_properties = { 'is_org_admin': False, 'org_admin_for': [], 'is_mentor': False, 'mentor_for': [], 'is_student': True, } self.data._profile = seeder_logic.seed( profile_model.Profile, properties=profile_properties) self.data._url_org = self.organization access_checker = access.IsUserOrgAdminForUrlOrg() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testOrgAdminAccessGranted(self): """Tests that an organization administrator is granted access.""" profile_properties = { 'is_org_admin': True, 'org_admin_for': [self.organization.key()], 'is_mentor': True, 'mentor_for': [self.organization.key()], 'is_student': False, } self.data._profile = seeder_logic.seed( profile_model.Profile, properties=profile_properties) self.data._url_org = self.organization access_checker = access.IsUserOrgAdminForUrlOrg() access_checker.checkAccess(self.data, None) class HasProfileAccessCheckerTest(unittest.TestCase): """Unit tests for HasProfileAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" self.sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=self.sponsor.key()) kwargs = { 'sponsor': self.sponsor.key().name(), 'program': self.program.program_id, } self.data = request_data.RequestData(None, None, kwargs) def testUserWithNoProfileAccessDenied(self): """Tests that access is denied if the user has no profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) access_checker = access.HAS_PROFILE_ACCESS_CHECKER with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testUserWithActiveProfileAccessGranted(self): """Tests that access is granted if the user has an active profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedNDBProfile(self.program.key(), user=user) access_checker = access.HAS_PROFILE_ACCESS_CHECKER access_checker.checkAccess(self.data, None) def testUserWithBannedProfileAccessDenied(self): """Tests that access is denied if the user has a banned profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedNDBProfile( self.program.key(), user=user, status=ndb_profile_model.Status.BANNED) access_checker = access.HAS_PROFILE_ACCESS_CHECKER with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testUserProfileForAnotherProgramAccessDenied(self): """Tests that access is denied if the profile is another program.""" other_program = program_utils.seedProgram(sponsor_key=self.sponsor.key()) user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedNDBProfile(other_program.key(), user=user) access_checker = access.HAS_PROFILE_ACCESS_CHECKER with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) class UrlOrgStatusAccessCheckerTest(unittest.TestCase): """Unit tests for UrlOrgStatusAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" sponsor = program_utils.seedSponsor() program = program_utils.seedProgram(sponsor_key=sponsor.key()) self.org = org_utils.seedOrganization(program.key()) kwargs = { 'sponsor': sponsor.key().name(), 'program': program.program_id, 'organization': self.org.org_id, } self.data = request_data.RequestData(None, None, kwargs) def testAccessDeniedForInvalidStatus(self): """Tests that access is denied if the organization has an invalid status.""" self.org.status = ndb_org_model.Status.REJECTED self.org.put() # the access checkers passes only for non-rejected organizations access_checker = access.UrlOrgStatusAccessChecker([ ndb_org_model.Status.ACCEPTED, ndb_org_model.Status.PRE_ACCEPTED, ndb_org_model.Status.PRE_REJECTED]) with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testAccessGrantedForValidStatus(self): """Tests that access is denied if the organization has an invalid status.""" self.org.status = ndb_org_model.Status.ACCEPTED self.org.put() # the access checkers passes only for non-rejected organizations access_checker = access.UrlOrgStatusAccessChecker([ ndb_org_model.Status.ACCEPTED, ndb_org_model.Status.PRE_ACCEPTED, ndb_org_model.Status.PRE_REJECTED]) access_checker.checkAccess(self.data, None) class HasNoProfileAccessCheckerTest(unittest.TestCase): """Unit tests for HasNoProfileAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" self.sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=self.sponsor.key()) kwargs = { 'sponsor': self.sponsor.key().name(), 'program': self.program.program_id } self.data = request_data.RequestData(None, None, kwargs) def testUserWithProfileAccessDenied(self): """Tests that access is denied for a user with a profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) profile_utils.seedNDBProfile(self.program.key(), user=user) access_checker = access.HasNoProfileAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testUserWithNoProfileAccessGranted(self): """Tests that access is granted for a user with no profile.""" user = profile_utils.seedNDBUser() profile_utils.loginNDB(user) access_checker = access.HasNoProfileAccessChecker() access_checker.checkAccess(self.data, None) class OrgsSignupStartedAccessCheckerTest(unittest.TestCase): """Unit tests for OrgsSignupStartedAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" sponsor = program_utils.seedSponsor() program = program_utils.seedProgram(sponsor_key=sponsor.key()) self.app_survey = program_utils.seedApplicationSurvey(program.key()) kwargs = { 'sponsor': sponsor.key().name(), 'program': program.program_id } self.data = request_data.RequestData(None, None, kwargs) def testBeforeOrgSignupStartedAccessDenied(self): """Tests that access is denied before organization sign-up starts.""" self.app_survey.survey_start = timeline_utils.future(delta=100) self.app_survey.survey_end = timeline_utils.future(delta=150) self.app_survey.put() access_checker = access.OrgSignupStartedAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testAfterOrgSignupStartedAccessGranted(self): """Tests that access is granted after organization sign-up starts.""" self.app_survey.survey_start = timeline_utils.past() self.app_survey.survey_end = timeline_utils.future() self.app_survey.put() access_checker = access.OrgSignupStartedAccessChecker() access_checker.checkAccess(self.data, None) def testAfterOrgSignupEndedAccessGranted(self): """Tests that access is granted after organization sign-up ends.""" self.app_survey.survey_start = timeline_utils.past(delta=150) self.app_survey.survey_end = timeline_utils.past(delta=100) self.app_survey.put() access_checker = access.OrgSignupStartedAccessChecker() access_checker.checkAccess(self.data, None) class OrgSignupActiveAccessCheckerTest(unittest.TestCase): """Unit tests for OrgSignupActiveAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" sponsor = program_utils.seedSponsor() program = program_utils.seedProgram(sponsor_key=sponsor.key()) self.app_survey = program_utils.seedApplicationSurvey(program.key()) kwargs = { 'sponsor': sponsor.key().name(), 'program': program.program_id } self.data = request_data.RequestData(None, None, kwargs) def testBeforeOrgSignupAccessDenied(self): """Tests that access is denied before organization sign-up starts.""" self.app_survey.survey_start = timeline_utils.future(delta=100) self.app_survey.survey_end = timeline_utils.future(delta=150) self.app_survey.put() access_checker = access.OrgSignupActiveAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testDuringOrgSignupAccessGranted(self): """Tests that access is granted during organization sign-up period.""" self.app_survey.survey_start = timeline_utils.past() self.app_survey.survey_end = timeline_utils.future() self.app_survey.put() access_checker = access.OrgSignupActiveAccessChecker() access_checker.checkAccess(self.data, None) def testAfterOrgSignupAccessDenied(self): """Tests that access is denied after organization sign-up ends.""" self.app_survey.survey_start = timeline_utils.past(delta=150) self.app_survey.survey_end = timeline_utils.past(delta=100) self.app_survey.put() access_checker = access.OrgSignupActiveAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) class OrgsAnnouncedAccessCheckerTest(unittest.TestCase): """Unit tests for OrgsAnnouncedAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=sponsor.key()) kwargs = { 'sponsor': sponsor.key().name(), 'program': self.program.program_id } self.data = request_data.RequestData(None, None, kwargs) def testBeforeOrgsAnnouncedAccessDenied(self): """Tests that access is denied before orgs are announced.""" self.program.timeline.accepted_organization_announced_deadline = ( timeline_utils.future()) self.program.timeline.put() access_checker = access.OrgsAnnouncedAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testAfterOrgsAnnouncedAccessGranted(self): """Tests that access is granted after orgs are announced.""" self.program.timeline.accepted_organization_announced_deadline = ( timeline_utils.past()) self.program.timeline.put() access_checker = access.OrgsAnnouncedAccessChecker() access_checker.checkAccess(self.data, None) class StudentSignupActiveAccessCheckerTest(unittest.TestCase): """Unit tests for StudentSignupActiveAccessChecker class.""" def setUp(self): """See unittest.TestCase.setUp for specification.""" sponsor = program_utils.seedSponsor() self.program = program_utils.seedProgram(sponsor_key=sponsor.key()) kwargs = { 'sponsor': sponsor.key().name(), 'program': self.program.program_id } self.data = request_data.RequestData(None, None, kwargs) def testBeforeStudentSignupAccessDenied(self): """Tests that access is denied before student sign-up period.""" self.program.timeline.student_signup_start = timeline_utils.future(delta=10) self.program.timeline.student_signup_end = timeline_utils.future(delta=20) self.program.timeline.put() access_checker = access.StudentSignupActiveAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testAfterStudentSignupAccessDenied(self): """Tests that access is denied after student sign-up period.""" self.program.timeline.student_signup_start = timeline_utils.past(delta=20) self.program.timeline.student_signup_end = timeline_utils.past(delta=10) self.program.timeline.put() access_checker = access.StudentSignupActiveAccessChecker() with self.assertRaises(exception.UserError) as context: access_checker.checkAccess(self.data, None) self.assertEqual(context.exception.status, httplib.FORBIDDEN) def testDuringStudentSignupAccessGranted(self): """Tests that access is granted during student sign-up period.""" self.program.timeline.student_signup_start = timeline_utils.past(delta=10) self.program.timeline.student_signup_end = timeline_utils.future(delta=10) self.program.timeline.put() access_checker = access.StudentSignupActiveAccessChecker() access_checker.checkAccess(self.data, None)
# -*- coding: utf-8 -*- """ Test command line parsing and actions """ import mock from orcoursetrion.cmd import execute from orcoursetrion.tests.base import TestGithubBase class TestGithubCommand(TestGithubBase): """Verify github commands via command line""" def test_cmd_create_export_repo(self): """ Command line test of create_export_repo """ args = [ 'orcoursetrion', 'create_export_repo', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, '-d', self.TEST_DESCRIPTION, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.create_export_repo.called) mocked_actions.create_export_repo.assert_called_with( self.TEST_COURSE, self.TEST_TERM, self.TEST_DESCRIPTION ) def test_cmd_rerun_studio(self): """ Command line test of rerun_studio """ args = [ 'orcoursetrion', 'rerun_studio', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, '-n', self.TEST_NEW_TERM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.rerun_studio.called) mocked_actions.rerun_studio.assert_called_with( self.TEST_COURSE, self.TEST_TERM, self.TEST_NEW_TERM ) def test_cmd_release_studio(self): """ Command line test of release_studio """ args = [ 'orcoursetrion', 'release_studio', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.release_studio.called) mocked_actions.release_studio.assert_called_with( self.TEST_COURSE, self.TEST_TERM, ) def test_cmd_create_xml_repo(self): """ Command line test of create_export_repo """ args = [ 'orcoursetrion', 'create_xml_repo', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, '-d', self.TEST_DESCRIPTION, '-g', self.TEST_TEAM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.create_xml_repo.called) mocked_actions.create_xml_repo.assert_called_with( self.TEST_COURSE, self.TEST_TERM, self.TEST_TEAM, None, self.TEST_DESCRIPTION ) args = [ 'orcoursetrion', 'create_xml_repo', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, '-d', self.TEST_DESCRIPTION, '-m', 'archlight', 'dreadnought', '-g', self.TEST_TEAM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.create_xml_repo.called) mocked_actions.create_xml_repo.assert_called_with( self.TEST_COURSE, self.TEST_TERM, self.TEST_TEAM, ['archlight', 'dreadnought'], self.TEST_DESCRIPTION ) args = [ 'orcoursetrion', 'create_xml_repo', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, '-d', self.TEST_DESCRIPTION, '-m', 'archlight', 'dreadnought', ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.create_xml_repo.called) mocked_actions.create_xml_repo.assert_called_with( self.TEST_COURSE, self.TEST_TERM, None, ['archlight', 'dreadnought'], self.TEST_DESCRIPTION ) def test_cmd_rerun_xml(self): """ Command line test of rerun_xml """ args = [ 'orcoursetrion', 'rerun_xml', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.rerun_xml.called) mocked_actions.rerun_xml.assert_called_with( self.TEST_COURSE, self.TEST_TERM, ) def test_cmd_release_xml(self): """ Command line test of release_xml """ args = [ 'orcoursetrion', 'release_xml', '-c', self.TEST_COURSE, '-t', self.TEST_TERM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.release_xml.called) mocked_actions.release_xml.assert_called_with( self.TEST_COURSE, self.TEST_TERM, ) def test_cmd_put_team(self): """ Command line test of create_export_repo """ args = [ 'orcoursetrion', 'put_team', '-o', self.ORG, '-g', self.TEST_TEAM, ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.put_team.called) mocked_actions.put_team.assert_called_with( self.ORG, self.TEST_TEAM, False, None ) args = [ 'orcoursetrion', 'put_team', '-o', self.ORG, '-r', '-g', self.TEST_TEAM, '-m', 'bizarnage', 'chemistro', ] with mock.patch('sys.argv', args): with mock.patch('orcoursetrion.cmd.actions') as mocked_actions: execute() self.assertTrue(mocked_actions.put_team.called) mocked_actions.put_team.assert_called_with( self.ORG, self.TEST_TEAM, True, ['bizarnage', 'chemistro'] )
# Copyright (c) 2011-2013 Turbulenz Limited """Implementation of the WebSocket protocol. `WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional communication between the browser and server. .. warning:: The WebSocket protocol was recently finalized as `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_ and is not yet supported in all browsers. Refer to http://caniuse.com/websockets for details on compatibility. In addition, during development the protocol went through several incompatible versions, and some browsers only support older versions. By default this module only supports the latest version of the protocol, but optional support for an older version (known as "draft 76" or "hixie-76") can be enabled by overriding `WebSocketHandler.allow_draft76` (see that method's documentation for caveats). """ from __future__ import absolute_import, division, print_function, with_statement # Author: Jacob Kristhammar, 2010 # All modifications: # Copyright (c) 2011-2013 Turbulenz Limited # pylint: disable=W0301,W0404,R0201,W0201,R0904,W0212,W0703,W0141,E1101,W0108,R0921,C0321 import array import base64 import collections import functools import hashlib import os import struct import time import tornado.escape import tornado.web from tornado.concurrent import Future from tornado.escape import utf8, native_str from tornado import httpclient from tornado.ioloop import IOLoop from tornado.log import gen_log, app_log from tornado.netutil import Resolver from tornado import simple_httpclient from tornado.util import bytes_type, unicode_type try: xrange # py2 except NameError: xrange = range # py3 class WebSocketHandler(tornado.web.RequestHandler): """Subclass this class to create a basic WebSocket handler. Override `on_message` to handle incoming messages, and use `write_message` to send messages to the client. You can also override `open` and `on_close` to handle opened and closed connections. See http://dev.w3.org/html5/websockets/ for details on the JavaScript interface. The protocol is specified at http://tools.ietf.org/html/rfc6455. Here is an example WebSocket handler that echos back all received messages back to the client:: class EchoWebSocket(websocket.WebSocketHandler): def open(self): print "WebSocket opened" def on_message(self, message): self.write_message(u"You said: " + message) def on_close(self): print "WebSocket closed" WebSockets are not standard HTTP connections. The "handshake" is HTTP, but after the handshake, the protocol is message-based. Consequently, most of the Tornado HTTP facilities are not available in handlers of this type. The only communication methods available to you are `write_message()`, `ping()`, and `close()`. Likewise, your request handler class should implement `open()` method rather than ``get()`` or ``post()``. If you map the handler above to ``/websocket`` in your application, you can invoke it in JavaScript with:: var ws = new WebSocket("ws://localhost:8888/websocket"); ws.onopen = function() { ws.send("Hello, world"); }; ws.onmessage = function (evt) { alert(evt.data); }; This script pops up an alert box that says "You said: Hello, world". """ def __init__(self, application, request, **kwargs): tornado.web.RequestHandler.__init__(self, application, request, **kwargs) self.stream = request.connection.stream self.ws_connection = None def _execute(self, transforms, *args, **kwargs): self.open_args = args self.open_kwargs = kwargs # Websocket only supports GET method if self.request.method != 'GET': self.stream.write(tornado.escape.utf8( "HTTP/1.1 405 Method Not Allowed\r\n\r\n" )) self.stream.close() return # Upgrade header should be present and should be equal to WebSocket if self.request.headers.get("Upgrade", "").lower() != 'websocket': self.stream.write(tornado.escape.utf8( "HTTP/1.1 400 Bad Request\r\n\r\n" "Can \"Upgrade\" only to \"WebSocket\"." )) self.stream.close() return # Connection header should be upgrade. Some proxy servers/load balancers # might mess with it. headers = self.request.headers connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(",")) if 'upgrade' not in connection: self.stream.write(tornado.escape.utf8( "HTTP/1.1 400 Bad Request\r\n\r\n" "\"Connection\" must be \"Upgrade\"." )) self.stream.close() return # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"): self.ws_connection = WebSocketProtocol13(self) self.ws_connection.accept_connection() elif (self.allow_draft76() and "Sec-WebSocket-Version" not in self.request.headers): self.ws_connection = WebSocketProtocol76(self) self.ws_connection.accept_connection() else: self.stream.write(tornado.escape.utf8( "HTTP/1.1 426 Upgrade Required\r\n" "Sec-WebSocket-Version: 8\r\n\r\n")) self.stream.close() def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket. The message may be either a string or a dict (which will be encoded as json). If the ``binary`` argument is false, the message will be sent as utf8; in binary mode any byte string is allowed. """ if isinstance(message, dict): message = tornado.escape.json_encode(message) self.ws_connection.write_message(message, binary=binary) def select_subprotocol(self, subprotocols): """Invoked when a new WebSocket requests specific subprotocols. ``subprotocols`` is a list of strings identifying the subprotocols proposed by the client. This method may be overridden to return one of those strings to select it, or ``None`` to not select a subprotocol. Failure to select a subprotocol does not automatically abort the connection, although clients may close the connection if none of their proposed subprotocols was selected. """ return None def open(self): """Invoked when a new WebSocket is opened. The arguments to `open` are extracted from the `tornado.web.URLSpec` regular expression, just like the arguments to `tornado.web.RequestHandler.get`. """ pass def on_message(self, message): """Handle incoming messages on the WebSocket This method must be overridden. """ raise NotImplementedError def ping(self, data): """Send ping frame to the remote end.""" self.ws_connection.write_ping(data) def on_pong(self, data): """Invoked when the response to a ping frame is received.""" pass def on_close(self): """Invoked when the WebSocket is closed.""" pass def close(self): """Closes this Web Socket. Once the close handshake is successful the socket will be closed. """ self.ws_connection.close() def allow_draft76(self): """Override to enable support for the older "draft76" protocol. The draft76 version of the websocket protocol is disabled by default due to security concerns, but it can be enabled by overriding this method to return True. Connections using the draft76 protocol do not support the ``binary=True`` flag to `write_message`. Support for the draft76 protocol is deprecated and will be removed in a future version of Tornado. """ return False def get_websocket_scheme(self): """Return the url scheme used for this request, either "ws" or "wss". This is normally decided by HTTPServer, but applications may wish to override this if they are using an SSL proxy that does not provide the X-Scheme header as understood by HTTPServer. Note that this is only used by the draft76 protocol. """ return "wss" if self.request.protocol == "https" else "ws" def async_callback(self, callback, *args, **kwargs): """Obsolete - catches exceptions from the wrapped function. This function is normally unncecessary thanks to `tornado.stack_context`. """ return self.ws_connection.async_callback(callback, *args, **kwargs) def _not_supported(self, *args, **kwargs): raise Exception("Method not supported for Web Sockets") def on_connection_close(self): if self.ws_connection: self.ws_connection.on_connection_close() self.ws_connection = None self.on_close() for method in ["write", "redirect", "set_header", "send_error", "set_cookie", "set_status", "flush", "finish"]: setattr(WebSocketHandler, method, WebSocketHandler._not_supported) class WebSocketProtocol(object): """Base class for WebSocket protocol versions. """ def __init__(self, handler): self.handler = handler self.request = handler.request self.stream = handler.stream self.client_terminated = False self.server_terminated = False def async_callback(self, callback, *args, **kwargs): """Wrap callbacks with this if they are used on asynchronous requests. Catches exceptions properly and closes this WebSocket if an exception is uncaught. """ if args or kwargs: callback = functools.partial(callback, *args, **kwargs) def wrapper(*args, **kwargs): try: return callback(*args, **kwargs) except Exception: app_log.error("Uncaught exception in %s", self.request.path, exc_info=True) self._abort() return wrapper def on_connection_close(self): self._abort() def _abort(self): """Instantly aborts the WebSocket connection by closing the socket""" self.client_terminated = True self.server_terminated = True self.stream.close() # forcibly tear down the connection self.close() # let the subclass cleanup class WebSocketProtocol76(WebSocketProtocol): """Implementation of the WebSockets protocol, version hixie-76. This class provides basic functionality to process WebSockets requests as specified in http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76 """ def __init__(self, handler): WebSocketProtocol.__init__(self, handler) self.challenge = None self._waiting = None def accept_connection(self): try: self._handle_websocket_headers() except ValueError: gen_log.debug("Malformed WebSocket request received") self._abort() return scheme = self.handler.get_websocket_scheme() # draft76 only allows a single subprotocol subprotocol_header = '' subprotocol = self.request.headers.get("Sec-WebSocket-Protocol", None) if subprotocol: selected = self.handler.select_subprotocol([subprotocol]) if selected: assert selected == subprotocol subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected # Write the initial headers before attempting to read the challenge. # This is necessary when using proxies (such as HAProxy), which # need to see the Upgrade headers before passing through the # non-HTTP traffic that follows. self.stream.write(tornado.escape.utf8( "HTTP/1.1 101 WebSocket Protocol Handshake\r\n" "Upgrade: WebSocket\r\n" "Connection: Upgrade\r\n" "Server: TornadoServer/%(version)s\r\n" "Sec-WebSocket-Origin: %(origin)s\r\n" "Sec-WebSocket-Location: %(scheme)s://%(host)s%(uri)s\r\n" "%(subprotocol)s" "\r\n" % (dict( version=tornado.version, origin=self.request.headers["Origin"], scheme=scheme, host=self.request.host, uri=self.request.uri, subprotocol=subprotocol_header)))) self.stream.read_bytes(8, self._handle_challenge) def challenge_response(self, challenge): """Generates the challenge response that's needed in the handshake The challenge parameter should be the raw bytes as sent from the client. """ key_1 = self.request.headers.get("Sec-Websocket-Key1") key_2 = self.request.headers.get("Sec-Websocket-Key2") try: part_1 = self._calculate_part(key_1) part_2 = self._calculate_part(key_2) except ValueError: raise ValueError("Invalid Keys/Challenge") return self._generate_challenge_response(part_1, part_2, challenge) def _handle_challenge(self, challenge): try: challenge_response = self.challenge_response(challenge) except ValueError: gen_log.debug("Malformed key data in WebSocket request") self._abort() return self._write_response(challenge_response) def _write_response(self, challenge): self.stream.write(challenge) self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs) self._receive_message() def _handle_websocket_headers(self): """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """ fields = ("Origin", "Host", "Sec-Websocket-Key1", "Sec-Websocket-Key2") if not all(map(lambda f: self.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers") def _calculate_part(self, key): """Processes the key headers and calculates their key value. Raises ValueError when feed invalid key.""" # pyflakes complains about variable reuse if both of these lines use 'c' number = int(''.join(c for c in key if c.isdigit())) spaces = len([c2 for c2 in key if c2.isspace()]) try: key_number = number // spaces except (ValueError, ZeroDivisionError): raise ValueError return struct.pack(">I", key_number) def _generate_challenge_response(self, part_1, part_2, part_3): m = hashlib.md5() m.update(part_1) m.update(part_2) m.update(part_3) return m.digest() def _receive_message(self): self.stream.read_bytes(1, self._on_frame_type) def _on_frame_type(self, byte): frame_type = ord(byte) if frame_type == 0x00: self.stream.read_until(b"\xff", self._on_end_delimiter) elif frame_type == 0xff: self.stream.read_bytes(1, self._on_length_indicator) else: self._abort() def _on_end_delimiter(self, frame): if not self.client_terminated: self.async_callback(self.handler.on_message)( frame[:-1].decode("utf-8", "replace")) if not self.client_terminated: self._receive_message() def _on_length_indicator(self, byte): if ord(byte) != 0x00: self._abort() return self.client_terminated = True self.close() def create_frame(self, message): """Creates a frame from the given text message.""" return b"\x00" + message + b"\xff" def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket.""" if binary: raise ValueError( "Binary messages not supported by this version of websockets") if isinstance(message, unicode_type): message = message.encode("utf-8") assert isinstance(message, bytes_type) self.stream.write(b"\x00" + message + b"\xff") def write_ping(self, data): """Send ping frame.""" raise ValueError("Ping messages not supported by this version of websockets") def close(self): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): self.stream.write("\xff\x00") self.server_terminated = True if self.client_terminated: if self._waiting is not None: self.stream.io_loop.remove_timeout(self._waiting) self._waiting = None self.stream.close() elif self._waiting is None: self._waiting = self.stream.io_loop.add_timeout( time.time() + 5, self._abort) class WebSocketProtocol13(WebSocketProtocol): """Implementation of the WebSocket protocol from RFC 6455. This class supports versions 7 and 8 of the protocol in addition to the final version 13. """ def __init__(self, handler, mask_outgoing=False): WebSocketProtocol.__init__(self, handler) self.mask_outgoing = mask_outgoing self._final_frame = False self._frame_opcode = None self._masked_frame = None self._frame_mask = None self._frame_length = None self._fragmented_message_buffer = None self._fragmented_message_opcode = None self._waiting = None def accept_connection(self): try: self._handle_websocket_headers() self._accept_connection() except ValueError: gen_log.debug("Malformed WebSocket request received", exc_info=True) self._abort() return def _handle_websocket_headers(self): """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """ fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version") if not all(map(lambda f: self.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers") @staticmethod def compute_accept_value(key): """Computes the value for the Sec-WebSocket-Accept header, given the value for Sec-WebSocket-Key. """ sha1 = hashlib.sha1() sha1.update(utf8(key)) sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value return native_str(base64.b64encode(sha1.digest())) def _challenge_response(self): return WebSocketProtocol13.compute_accept_value( self.request.headers.get("Sec-Websocket-Key")) def _accept_connection(self): subprotocol_header = '' subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '') subprotocols = [s.strip() for s in subprotocols.split(',')] if subprotocols: selected = self.handler.select_subprotocol(subprotocols) if selected: assert selected in subprotocols subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected self.stream.write(tornado.escape.utf8( "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" "Sec-WebSocket-Accept: %s\r\n" "%s" "\r\n" % (self._challenge_response(), subprotocol_header))) self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs) self._receive_frame() def _write_frame(self, fin, opcode, data): if fin: finbit = 0x80 else: finbit = 0 frame = struct.pack("B", finbit | opcode) l = len(data) if self.mask_outgoing: mask_bit = 0x80 else: mask_bit = 0 if l < 126: frame += struct.pack("B", l | mask_bit) elif l <= 0xFFFF: frame += struct.pack("!BH", 126 | mask_bit, l) else: frame += struct.pack("!BQ", 127 | mask_bit, l) if self.mask_outgoing: mask = os.urandom(4) data = mask + self._apply_mask(mask, data) frame += data self.stream.write(frame) def create_frame(self, message): """Creates a frame from the given text message.""" frame = b'\x81' l = len(message) if l < 126: frame += struct.pack("B", l) elif l <= 0xFFFF: frame += struct.pack("!BH", 126, l) else: frame += struct.pack("!BQ", 127, l) frame += message return frame def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket.""" if binary: opcode = 0x2 else: opcode = 0x1 message = tornado.escape.utf8(message) assert isinstance(message, bytes_type) self._write_frame(True, opcode, message) def write_ping(self, data): """Send ping frame.""" assert isinstance(data, bytes_type) self._write_frame(True, 0x9, data) def _receive_frame(self): self.stream.read_bytes(2, self._on_frame_start) def _on_frame_start(self, data): header, payloadlen = struct.unpack("BB", data) self._final_frame = header & 0x80 reserved_bits = header & 0x70 self._frame_opcode = header & 0xf self._frame_opcode_is_control = self._frame_opcode & 0x8 if reserved_bits: # client is using as-yet-undefined extensions; abort self._abort() return self._masked_frame = bool(payloadlen & 0x80) payloadlen = payloadlen & 0x7f if self._frame_opcode_is_control and payloadlen >= 126: # control frames must have payload < 126 self._abort() return if payloadlen < 126: self._frame_length = payloadlen if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self.stream.read_bytes(self._frame_length, self._on_frame_data) elif payloadlen == 126: self.stream.read_bytes(2, self._on_frame_length_16) elif payloadlen == 127: self.stream.read_bytes(8, self._on_frame_length_64) def _on_frame_length_16(self, data): self._frame_length = struct.unpack("!H", data)[0] if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self.stream.read_bytes(self._frame_length, self._on_frame_data) def _on_frame_length_64(self, data): self._frame_length = struct.unpack("!Q", data)[0] if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self.stream.read_bytes(self._frame_length, self._on_frame_data) def _on_masking_key(self, data): self._frame_mask = data self.stream.read_bytes(self._frame_length, self._on_masked_frame_data) def _apply_mask(self, mask, data): mask = array.array("B", mask) unmasked = array.array("B", data) for i in xrange(len(data)): unmasked[i] = unmasked[i] ^ mask[i % 4] if hasattr(unmasked, 'tobytes'): # tostring was deprecated in py32. It hasn't been removed, # but since we turn on deprecation warnings in our tests # we need to use the right one. return unmasked.tobytes() else: return unmasked.tostring() def _on_masked_frame_data(self, data): self._on_frame_data(self._apply_mask(self._frame_mask, data)) def _on_frame_data(self, data): if self._frame_opcode_is_control: # control frames may be interleaved with a series of fragmented # data frames, so control frames must not interact with # self._fragmented_* if not self._final_frame: # control frames must not be fragmented self._abort() return opcode = self._frame_opcode elif self._frame_opcode == 0: # continuation frame if self._fragmented_message_buffer is None: # nothing to continue self._abort() return self._fragmented_message_buffer += data if self._final_frame: opcode = self._fragmented_message_opcode data = self._fragmented_message_buffer self._fragmented_message_buffer = None else: # start of new data message if self._fragmented_message_buffer is not None: # can't start new message until the old one is finished self._abort() return if self._final_frame: opcode = self._frame_opcode else: self._fragmented_message_opcode = self._frame_opcode self._fragmented_message_buffer = data if self._final_frame: self._handle_message(opcode, data) if not self.client_terminated: self._receive_frame() def _handle_message(self, opcode, data): if self.client_terminated: return if opcode == 0x1: # UTF-8 data try: decoded = data.decode("utf-8") except UnicodeDecodeError: self._abort() return self.async_callback(self.handler.on_message)(decoded) elif opcode == 0x2: # Binary data self.async_callback(self.handler.on_message)(data) elif opcode == 0x8: # Close self.client_terminated = True self.close() elif opcode == 0x9: # Ping self._write_frame(True, 0xA, data) elif opcode == 0xA: # Pong self.async_callback(self.handler.on_pong)(data) else: self._abort() def close(self): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): self._write_frame(True, 0x8, b"") self.server_terminated = True if self.client_terminated: if self._waiting is not None: self.stream.io_loop.remove_timeout(self._waiting) self._waiting = None self.stream.close() elif self._waiting is None: # Give the client a few seconds to complete a clean shutdown, # otherwise just close the connection. self._waiting = self.stream.io_loop.add_timeout( self.stream.io_loop.time() + 5, self._abort) class WebSocketClientConnection(simple_httpclient._HTTPConnection): """WebSocket client connection.""" def __init__(self, io_loop, request): self.connect_future = Future() self.read_future = None self.read_queue = collections.deque() self.key = base64.b64encode(os.urandom(16)) scheme, sep, rest = request.url.partition(':') scheme = {'ws': 'http', 'wss': 'https'}[scheme] request.url = scheme + sep + rest request.headers.update({ 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': self.key, 'Sec-WebSocket-Version': '13', }) super(WebSocketClientConnection, self).__init__( io_loop, None, request, lambda: None, lambda response: None, 104857600, Resolver(io_loop=io_loop)) def _on_close(self): self.on_message(None) def _handle_1xx(self, code): assert code == 101 assert self.headers['Upgrade'].lower() == 'websocket' assert self.headers['Connection'].lower() == 'upgrade' accept = WebSocketProtocol13.compute_accept_value(self.key) assert self.headers['Sec-Websocket-Accept'] == accept self.protocol = WebSocketProtocol13(self, mask_outgoing=True) self.protocol._receive_frame() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None self.connect_future.set_result(self) def write_message(self, message, binary=False): """Sends a message to the WebSocket server.""" self.protocol.write_message(message, binary) def read_message(self, callback=None): """Reads a message from the WebSocket server. Returns a future whose result is the message, or None if the connection is closed. If a callback argument is given it will be called with the future when it is ready. """ assert self.read_future is None future = Future() if self.read_queue: future.set_result(self.read_queue.popleft()) else: self.read_future = future if callback is not None: self.io_loop.add_future(future, callback) return future def on_message(self, message): if self.read_future is not None: self.read_future.set_result(message) self.read_future = None else: self.read_queue.append(message) def on_pong(self, data): pass def websocket_connect(url, io_loop=None, callback=None): """Client-side websocket support. Takes a url and returns a Future whose result is a `WebSocketClientConnection`. """ if io_loop is None: io_loop = IOLoop.current() request = httpclient.HTTPRequest(url) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) conn = WebSocketClientConnection(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future
#!/usr/bin/env python # Copyright 2020 Arm Limited. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import re import subprocess import sys logger = logging.getLogger(__name__) """ Record the significant parts of shared library content. When anything here changes, the callers of the library must be relinked. This includes the SONAME and the dynamic symbol table (ignoring addresses and sizes). This is expected to work on Linux and OSX. """ # Environment to use for processes we parse output from. # Force the C locale. child_env = os.environ.copy() child_env['LC_ALL'] = "C" def parse_args(): parser = argparse.ArgumentParser( description="Generate table of contents for a shared library") parser.add_argument("-o", "--output", default=None, help=".toc file to create") parser.add_argument("--format", action="store", choices=["elf", "macho"], default="elf", help="Library format") parser.add_argument("--objdump-tool", default="objdump", help="Tool used to generate TOCs for Elf libraries. " "This is expected to be objdump on Linux platforms") parser.add_argument("--otool-tool", default="otool", help="Tool used to read library section headers of Mach-O libraries. " "This is expected to be otool on OSX") parser.add_argument("--nm-tool", default="nm", help="Tool used to read the dynamic symbol table of Mach-O libraries. " "This is expected to be nm on OSX") parser.add_argument("input", help="Shared library") args = parser.parse_args() return args def line_filter(regexp, lines): """ Filter each line of input by a regular expression. If the regular expression is not matched, then the line is not output. """ output = [] for line in lines: match = regexp.match(line) if match: output.append(line) return output def line_filter_and_transform(filter_re, transform_re, repl, lines): """ Transform and filter lines. Filter out lines that match filter_re. Transform remaining lines by transform_re and repl as a regular expression replace. If the regular expression is not matched, then the line is output as is. """ output = [] for line in lines: match = filter_re.match(line) if match: # Drop line matching filter_re continue output.append(transform_re.sub(repl, line)) return output def elf_toc(lib, tool): """ Generate a table of contents for ELF files. This function uses objdump from GNU binutils. """ toc = [] # Get private (format specific) headers, which includes SONAME cmd = [tool, "-p", lib] try: result = subprocess.check_output(cmd, env=child_env) except subprocess.CalledProcessError as e: logger.error("Command failed: %s", str(e.cmd)) sys.exit(e.returncode) result_arr = result.decode(sys.getdefaultencoding()).split("\n") # `objdump -p` outputs a header per line, and some version information. # Just pick up the lines containing the symbols we're interested in. regexp = re.compile(r'\s+SONAME\s') toc.extend(line_filter(regexp, result_arr)) # Get dynamic symbol table from objdump cmd = [tool, "-T", lib] try: result = subprocess.check_output(cmd, env=child_env) except subprocess.CalledProcessError as e: logger.error("Command failed: %s", str(e.cmd)) sys.exit(e.returncode) result_arr = result.decode(sys.getdefaultencoding()).split("\n") # `objdump -T` outputs something like: # # install/lib/library.so: file format elf64-x86-64 # # DYNAMIC SYMBOL TABLE: # 0000000000000000 w D *UND* 0000000000000000 OPT_VER __gmon_start__ # 0000000000000480 g DF .init 0000000000000000 _init # 00000000000005dc g DF .fini 0000000000000000 _fini # 0000000000001868 g D *ABS* 0000000000000000 _edata # 0000000000001868 g D *ABS* 0000000000000000 __bss_start # 0000000000001869 g D *ABS* 0000000000000000 _end # # The first column is address # The next 'column' are 7 flags (which may not be present) # The third column is the section name # The fourth is size. # After this is an optional version, and then the symbol # # We want to drop address and size # # See https://sourceware.org/binutils/docs/binutils/objdump.html # # Filter out undefined symbols, indicated with *UND* as the section name flags_re = r'[lgu! ][w ][C ][W ][Ii ][dD ][FfO ]' lax_flags_re = r'.{7}' section_re = r'\S+' hexdigits_re = r'[\da-f]+' filter_undefined_re = re.compile(hexdigits_re + r'\s' + lax_flags_re + r'\s\*UND\*') transform_re = re.compile(r'^' + hexdigits_re + r'\s(' + flags_re + r'\s' + section_re + r')\s+' + hexdigits_re + r'(\s+.+)$') repl = r'\1\2' toc.extend(line_filter_and_transform(filter_undefined_re, transform_re, repl, result_arr)) return toc def macho_toc(lib, otool, nm): """ Generate a table of contents for Mach-O format libraries. This relies on otool and nm. Don't currently support cross compiles. """ toc = [] # In Mach-O, the equivalent of SONAME is LC_ID_DYLIB. We can # retrieve this with `otool -D` cmd = [otool, "-D", lib] try: result = subprocess.check_output(cmd, env=child_env) except subprocess.CalledProcessError as e: logger.error("Command failed: %s", str(e.cmd)) sys.exit(e.returncode) result_arr = result.decode(sys.getdefaultencoding()).split('\n') toc.extend(result_arr) # Get global symbols, portable format cmd = [nm, "-gP", lib] try: result = subprocess.check_output(cmd, env=child_env) except subprocess.CalledProcessError as e: logger.error("Command failed: %s", str(e.cmd)) sys.exit(e.returncode) result_arr = result.decode(sys.getdefaultencoding()).split('\n') # The output of `nm -gP` is 4 columns: symbol, type, address?, size? # Only keep the first 2 columns, and drop undefined symbols (type 'U') filter_re = re.compile(r'\S+\sU\s') transform_re = re.compile(r'^(\S+\s[UATDBC\-SI])\s.*') repl = r'\1' toc.extend(line_filter_and_transform(filter_re, transform_re, repl, result_arr)) return toc def write_if_changed(filename, data): """ Write data to file replacing current content, but only if the content has changed, or the file doesn't exist. """ same_content = False try: if os.path.isfile(filename): with open(filename, "rt") as fp: original_content = fp.read() same_content = data == original_content finally: if not same_content: logger.debug("Updating {}".format(filename)) with open(filename, "wt") as fp: fp.write(data) def main(): logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING) args = parse_args() if args.format == "elf": lines = elf_toc(args.input, args.objdump_tool) elif args.format == "macho": lines = macho_toc(args.input, args.otool_tool, args.nm_tool) toc = "\n".join(lines) toc += "\n" # Include a newline at the end of the file if args.output: write_if_changed(args.output, toc) else: sys.stdout.write(toc) if __name__ == "__main__": main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for data input for speech commands.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio from tensorflow.examples.speech_commands import input_data from tensorflow.examples.speech_commands import models from tensorflow.python.framework import test_util from tensorflow.python.platform import test class InputDataTest(test.TestCase): def _getWavData(self): with self.cached_session() as sess: sample_data = tf.zeros([32000, 2]) wav_encoder = contrib_audio.encode_wav(sample_data, 16000) wav_data = self.evaluate(wav_encoder) return wav_data def _saveTestWavFile(self, filename, wav_data): with open(filename, "wb") as f: f.write(wav_data) def _saveWavFolders(self, root_dir, labels, how_many): wav_data = self._getWavData() for label in labels: dir_name = os.path.join(root_dir, label) os.mkdir(dir_name) for i in range(how_many): file_path = os.path.join(dir_name, "some_audio_%d.wav" % i) self._saveTestWavFile(file_path, wav_data) def _model_settings(self): return { "desired_samples": 160, "fingerprint_size": 40, "label_count": 4, "window_size_samples": 100, "window_stride_samples": 100, "fingerprint_width": 40, "preprocess": "mfcc", } def _runGetDataTest(self, preprocess, window_length_ms): tmp_dir = self.get_temp_dir() wav_dir = os.path.join(tmp_dir, "wavs") os.mkdir(wav_dir) self._saveWavFolders(wav_dir, ["a", "b", "c"], 100) background_dir = os.path.join(wav_dir, "_background_noise_") os.mkdir(background_dir) wav_data = self._getWavData() for i in range(10): file_path = os.path.join(background_dir, "background_audio_%d.wav" % i) self._saveTestWavFile(file_path, wav_data) model_settings = models.prepare_model_settings( 4, 16000, 1000, window_length_ms, 20, 40, preprocess) with self.cached_session() as sess: audio_processor = input_data.AudioProcessor( "", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir) result_data, result_labels = audio_processor.get_data( 10, 0, model_settings, 0.3, 0.1, 100, "training", sess) self.assertEqual(10, len(result_data)) self.assertEqual(10, len(result_labels)) def testPrepareWordsList(self): words_list = ["a", "b"] self.assertGreater( len(input_data.prepare_words_list(words_list)), len(words_list)) def testWhichSet(self): self.assertEqual( input_data.which_set("foo.wav", 10, 10), input_data.which_set("foo.wav", 10, 10)) self.assertEqual( input_data.which_set("foo_nohash_0.wav", 10, 10), input_data.which_set("foo_nohash_1.wav", 10, 10)) @test_util.run_deprecated_v1 def testPrepareDataIndex(self): tmp_dir = self.get_temp_dir() self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100) audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10, self._model_settings(), tmp_dir) self.assertLess(0, audio_processor.set_size("training")) self.assertTrue("training" in audio_processor.data_index) self.assertTrue("validation" in audio_processor.data_index) self.assertTrue("testing" in audio_processor.data_index) self.assertEquals(input_data.UNKNOWN_WORD_INDEX, audio_processor.word_to_index["c"]) def testPrepareDataIndexEmpty(self): tmp_dir = self.get_temp_dir() self._saveWavFolders(tmp_dir, ["a", "b", "c"], 0) with self.assertRaises(Exception) as e: _ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10, self._model_settings(), tmp_dir) self.assertTrue("No .wavs found" in str(e.exception)) def testPrepareDataIndexMissing(self): tmp_dir = self.get_temp_dir() self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100) with self.assertRaises(Exception) as e: _ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10, 10, self._model_settings(), tmp_dir) self.assertTrue("Expected to find" in str(e.exception)) @test_util.run_deprecated_v1 def testPrepareBackgroundData(self): tmp_dir = self.get_temp_dir() background_dir = os.path.join(tmp_dir, "_background_noise_") os.mkdir(background_dir) wav_data = self._getWavData() for i in range(10): file_path = os.path.join(background_dir, "background_audio_%d.wav" % i) self._saveTestWavFile(file_path, wav_data) self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100) audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10, self._model_settings(), tmp_dir) self.assertEqual(10, len(audio_processor.background_data)) def testLoadWavFile(self): tmp_dir = self.get_temp_dir() file_path = os.path.join(tmp_dir, "load_test.wav") wav_data = self._getWavData() self._saveTestWavFile(file_path, wav_data) sample_data = input_data.load_wav_file(file_path) self.assertIsNotNone(sample_data) def testSaveWavFile(self): tmp_dir = self.get_temp_dir() file_path = os.path.join(tmp_dir, "load_test.wav") save_data = np.zeros([16000, 1]) input_data.save_wav_file(file_path, save_data, 16000) loaded_data = input_data.load_wav_file(file_path) self.assertIsNotNone(loaded_data) self.assertEqual(16000, len(loaded_data)) @test_util.run_deprecated_v1 def testPrepareProcessingGraph(self): tmp_dir = self.get_temp_dir() wav_dir = os.path.join(tmp_dir, "wavs") os.mkdir(wav_dir) self._saveWavFolders(wav_dir, ["a", "b", "c"], 100) background_dir = os.path.join(wav_dir, "_background_noise_") os.mkdir(background_dir) wav_data = self._getWavData() for i in range(10): file_path = os.path.join(background_dir, "background_audio_%d.wav" % i) self._saveTestWavFile(file_path, wav_data) model_settings = { "desired_samples": 160, "fingerprint_size": 40, "label_count": 4, "window_size_samples": 100, "window_stride_samples": 100, "fingerprint_width": 40, "preprocess": "mfcc", } audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir) self.assertIsNotNone(audio_processor.wav_filename_placeholder_) self.assertIsNotNone(audio_processor.foreground_volume_placeholder_) self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_) self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_) self.assertIsNotNone(audio_processor.background_data_placeholder_) self.assertIsNotNone(audio_processor.background_volume_placeholder_) self.assertIsNotNone(audio_processor.output_) @test_util.run_deprecated_v1 def testGetDataAverage(self): self._runGetDataTest("average", 10) @test_util.run_deprecated_v1 def testGetDataAverageLongWindow(self): self._runGetDataTest("average", 30) @test_util.run_deprecated_v1 def testGetDataMfcc(self): self._runGetDataTest("mfcc", 30) @test_util.run_deprecated_v1 def testGetUnprocessedData(self): tmp_dir = self.get_temp_dir() wav_dir = os.path.join(tmp_dir, "wavs") os.mkdir(wav_dir) self._saveWavFolders(wav_dir, ["a", "b", "c"], 100) model_settings = { "desired_samples": 160, "fingerprint_size": 40, "label_count": 4, "window_size_samples": 100, "window_stride_samples": 100, "fingerprint_width": 40, "preprocess": "mfcc", } audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir) result_data, result_labels = audio_processor.get_unprocessed_data( 10, model_settings, "training") self.assertEqual(10, len(result_data)) self.assertEqual(10, len(result_labels)) @test_util.run_deprecated_v1 def testGetFeaturesForWav(self): tmp_dir = self.get_temp_dir() wav_dir = os.path.join(tmp_dir, "wavs") os.mkdir(wav_dir) self._saveWavFolders(wav_dir, ["a", "b", "c"], 1) desired_samples = 1600 model_settings = { "desired_samples": desired_samples, "fingerprint_size": 40, "label_count": 4, "window_size_samples": 100, "window_stride_samples": 100, "fingerprint_width": 40, "average_window_width": 6, "preprocess": "average", } with self.cached_session() as sess: audio_processor = input_data.AudioProcessor( "", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir) sample_data = np.zeros([desired_samples, 1]) for i in range(desired_samples): phase = i % 4 if phase == 0: sample_data[i, 0] = 0 elif phase == 1: sample_data[i, 0] = -1 elif phase == 2: sample_data[i, 0] = 0 elif phase == 3: sample_data[i, 0] = 1 test_wav_path = os.path.join(tmp_dir, "test_wav.wav") input_data.save_wav_file(test_wav_path, sample_data, 16000) results = audio_processor.get_features_for_wav(test_wav_path, model_settings, sess) spectrogram = results[0] self.assertEqual(1, spectrogram.shape[0]) self.assertEqual(16, spectrogram.shape[1]) self.assertEqual(11, spectrogram.shape[2]) self.assertNear(0, spectrogram[0, 0, 0], 0.1) self.assertNear(200, spectrogram[0, 0, 5], 0.1) def testGetFeaturesRange(self): model_settings = { "preprocess": "average", } features_min, _ = input_data.get_features_range(model_settings) self.assertNear(0.0, features_min, 1e-5) def testGetMfccFeaturesRange(self): model_settings = { "preprocess": "mfcc", } features_min, features_max = input_data.get_features_range(model_settings) self.assertLess(features_min, features_max) if __name__ == "__main__": test.main()
# -*- coding: utf-8 -*- """This file contains a MRUList Registry plugin.""" import abc import logging import construct from plaso.events import windows_events from plaso.lib import binary from plaso.parsers import winreg from plaso.parsers.shared import shell_items from plaso.parsers.winreg_plugins import interface # A mixin class is used here to not to have the duplicate functionality # to parse the MRUList Registry values. However multiple inheritance # and thus mixins are to be used sparsely in this codebase, hence we need # to find a better solution in not needing to distinguish between key and # value plugins. # TODO: refactor Registry key and value plugin to rid ourselves of the mixin. class MRUListPluginMixin(object): """Class for common MRUList Windows Registry plugin functionality.""" _MRULIST_STRUCT = construct.Range(1, 500, construct.ULInt16(u'entry_letter')) @abc.abstractmethod def _ParseMRUListEntryValue( self, parser_mediator, key, entry_index, entry_letter, file_entry=None, parser_chain=None, **kwargs): """Parses the MRUList entry value. Args: parser_mediator: A parser context object (instance of ParserContext). key: the Registry key (instance of winreg.WinRegKey) that contains the MRUList value. entry_index: integer value representing the MRUList entry index. entry_letter: character value representing the entry. file_entry: Optional file entry object (instance of dfvfs.FileEntry). The default is None. parser_chain: Optional string containing the parsing chain up to this point. The default is None. Returns: A string containing the value. """ def _ParseMRUListValue(self, key): """Parses the MRUList value in a given Registry key. Args: key: the Registry key (instance of winreg.WinRegKey) that contains the MRUList value. Returns: A MRUList value generator, which returns the MRU index number and entry value. """ mru_list_value = key.GetValue(u'MRUList') # The key exists but does not contain a value named "MRUList". if not mru_list_value: return enumerate([]) try: mru_list = self._MRULIST_STRUCT.parse(mru_list_value.raw_data) except construct.FieldError: logging.warning(u'[{0:s}] Unable to parse the MRU key: {1:s}'.format( self.NAME, key.path)) return enumerate([]) return enumerate(mru_list) def _ParseMRUListKey( self, parser_mediator, key, registry_type=None, file_entry=None, parser_chain=None, codepage=u'cp1252'): """Extract event objects from a MRUList Registry key. Args: parser_mediator: A parser context object (instance of ParserContext). key: the Registry key (instance of winreg.WinRegKey). registry_type: Optional Registry type string. The default is None. codepage: Optional extended ASCII string codepage. The default is cp1252. file_entry: Optional file entry object (instance of dfvfs.FileEntry). The default is None. parser_chain: Optional string containing the parsing chain up to this point. The default is None. """ text_dict = {} for entry_index, entry_letter in self._ParseMRUListValue(key): # TODO: detect if list ends prematurely. # MRU lists are terminated with \0 (0x0000). if entry_letter == 0: break entry_letter = chr(entry_letter) value_string = self._ParseMRUListEntryValue( parser_mediator, key, entry_index, entry_letter, codepage=codepage, file_entry=file_entry, parser_chain=parser_chain) value_text = u'Index: {0:d} [MRU Value {1:s}]'.format( entry_index + 1, entry_letter) text_dict[value_text] = value_string event_object = windows_events.WindowsRegistryEvent( key.last_written_timestamp, key.path, text_dict, offset=key.offset, registry_type=registry_type, source_append=': MRU List') parser_mediator.ProduceEvent(event_object) class MRUListStringPlugin(interface.ValuePlugin, MRUListPluginMixin): """Windows Registry plugin to parse a string MRUList.""" NAME = u'mrulist_string' DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.' REG_TYPE = u'any' REG_VALUES = frozenset([u'MRUList', u'a']) URLS = [u'http://forensicartifacts.com/tag/mru/'] def _ParseMRUListEntryValue( self, parser_mediator, key, entry_index, entry_letter, **unused_kwargs): """Parses the MRUList entry value. Args: parser_mediator: A parser context object (instance of ParserContext). key: the Registry key (instance of winreg.WinRegKey) that contains the MRUList value. entry_index: integer value representing the MRUList entry index. entry_letter: character value representing the entry. Returns: A string containing the value. """ value_string = u'' value = key.GetValue(u'{0:s}'.format(entry_letter)) if value is None: logging.debug( u'[{0:s}] Missing MRUList entry value: {1:s} in key: {2:s}.'.format( self.NAME, entry_letter, key.path)) elif value.DataIsString(): value_string = value.data elif value.DataIsBinaryData(): logging.debug(( u'[{0:s}] Non-string MRUList entry value: {1:s} parsed as string ' u'in key: {2:s}.').format(self.NAME, entry_letter, key.path)) utf16_stream = binary.ByteStreamCopyToUtf16Stream(value.data) try: value_string = utf16_stream.decode(u'utf-16-le') except UnicodeDecodeError as exception: value_string = binary.HexifyBuffer(utf16_stream) logging.warning(( u'[{0:s}] Unable to decode UTF-16 stream: {1:s} in MRUList entry ' u'value: {2:s} in key: {3:s} with error: {4:s}').format( self.NAME, value_string, entry_letter, key.path, exception)) return value_string def GetEntries( self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252', **kwargs): """Extracts event objects from a MRU list. Args: parser_mediator: A parser context object (instance of ParserContext). key: Optional Registry key (instance of winreg.WinRegKey). The default is None. registry_type: Optional Registry type string. The default is None. codepage: Optional extended ASCII string codepage. The default is cp1252. """ self._ParseMRUListKey( parser_mediator, key, registry_type=registry_type, codepage=codepage) def Process( self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252', **kwargs): """Determine if we can process this Registry key or not. Args: parser_mediator: A parser context object (instance of ParserContext). key: Optional Registry key (instance of winreg.WinRegKey). The default is None. registry_type: Optional Registry type string. The default is None. codepage: Optional extended ASCII string codepage. The default is cp1252. """ # Prevent this plugin triggering on sub paths of non-string MRUList values. if u'Explorer\\DesktopStreamMRU' in key.path: return super(MRUListStringPlugin, self).Process( parser_mediator, key=key, registry_type=registry_type, codepage=codepage) class MRUListShellItemListPlugin(interface.KeyPlugin, MRUListPluginMixin): """Windows Registry plugin to parse a shell item list MRUList.""" NAME = u'mrulist_shell_item_list' DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.' REG_TYPE = u'any' REG_KEYS = frozenset([ (u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\' u'DesktopStreamMRU')]) URLS = [u'https://github.com/libyal/winreg-kb/wiki/MRU-keys'] def _ParseMRUListEntryValue( self, parser_mediator, key, entry_index, entry_letter, codepage=u'cp1252', file_entry=None, parser_chain=None, **unused_kwargs): """Parses the MRUList entry value. Args: parser_mediator: A parser context object (instance of ParserContext). key: the Registry key (instance of winreg.WinRegKey) that contains the MRUList value. entry_index: integer value representing the MRUList entry index. entry_letter: character value representing the entry. codepage: Optional extended ASCII string codepage. The default is cp1252. file_entry: Optional file entry object (instance of dfvfs.FileEntry). The default is None. parser_chain: Optional string containing the parsing chain up to this point. The default is None. Returns: A string containing the value. """ value_string = u'' value = key.GetValue(u'{0:s}'.format(entry_letter)) if value is None: logging.debug( u'[{0:s}] Missing MRUList entry value: {1:s} in key: {2:s}.'.format( self.NAME, entry_letter, key.path)) elif not value.DataIsBinaryData(): logging.debug(( u'[{0:s}] Non-binary MRUList entry value: {1:s} in key: ' u'{2:s}.').format(self.NAME, entry_letter, key.path)) elif value.data: shell_items_parser = shell_items.ShellItemsParser(key.path) shell_items_parser.UpdateChainAndParse( parser_mediator, value.data, None, codepage=codepage) value_string = u'Shell item path: {0:s}'.format( shell_items_parser.CopyToPath()) return value_string def GetEntries( self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252', **kwargs): """Extract event objects from a Registry key containing a MRUList value. Args: parser_mediator: A parser context object (instance of ParserContext). key: Optional Registry key (instance of winreg.WinRegKey). The default is None. registry_type: Optional Registry type string. The default is None. codepage: Optional extended ASCII string codepage. The default is cp1252. """ self._ParseMRUListKey( parser_mediator, key, registry_type=registry_type, codepage=codepage) winreg.WinRegistryParser.RegisterPlugins([ MRUListStringPlugin, MRUListShellItemListPlugin])
# Copyright 2011-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """Functions and classes common to multiple pymongo modules.""" import warnings from pymongo import read_preferences from pymongo.read_preferences import ReadPreference from pymongo.errors import ConfigurationError def raise_config_error(key, dummy): """Raise ConfigurationError with the given key name.""" raise ConfigurationError("Unknown option %s" % (key,)) def validate_boolean(option, value): """Validates that 'value' is 'true' or 'false'. """ if isinstance(value, bool): return value elif isinstance(value, basestring): if value not in ('true', 'false'): raise ConfigurationError("The value of %s must be " "'true' or 'false'" % (option,)) return value == 'true' raise TypeError("Wrong type for %s, value must be a boolean" % (option,)) def validate_integer(option, value): """Validates that 'value' is an integer (or basestring representation). """ if isinstance(value, (int, long)): return value elif isinstance(value, basestring): if not value.isdigit(): raise ConfigurationError("The value of %s must be " "an integer" % (option,)) return int(value) raise TypeError("Wrong type for %s, value must be an integer" % (option,)) def validate_positive_integer(option, value): """Validate that 'value' is a positive integer. """ val = validate_integer(option, value) if val < 0: raise ConfigurationError("The value of %s must be " "a positive integer" % (option,)) return val def validate_basestring(option, value): """Validates that 'value' is an instance of `basestring`. """ if isinstance(value, basestring): return value raise TypeError("Wrong type for %s, value must be an " "instance of %s" % (option, basestring.__name__)) def validate_int_or_basestring(option, value): """Validates that 'value' is an integer or string. """ if isinstance(value, (int, long)): return value elif isinstance(value, basestring): if value.isdigit(): return int(value) return value raise TypeError("Wrong type for %s, value must be an " "integer or a string" % (option,)) def validate_positive_float(option, value): """Validates that 'value' is a float, or can be converted to one, and is positive. """ err = ConfigurationError("%s must be a positive int or float" % (option,)) try: value = float(value) except (ValueError, TypeError): raise err if value <= 0: raise err return value def validate_timeout_or_none(option, value): """Validates a timeout specified in milliseconds returning a value in floating point seconds. """ if value is None: return value return validate_positive_float(option, value) / 1000.0 def validate_read_preference(dummy, value): """Validate read preference for a ReplicaSetConnection. """ if value not in read_preferences.modes: raise ConfigurationError("Not a valid read preference") return value def validate_tag_sets(dummy, value): """Validate tag sets for a ReplicaSetConnection. """ if value is None: return [{}] if not isinstance(value, list): raise ConfigurationError(( "Tag sets %s invalid, must be a list" ) % repr(value)) if len(value) == 0: raise ConfigurationError(( "Tag sets %s invalid, must be None or contain at least one set of" " tags") % repr(value)) for tags in value: if not isinstance(tags, dict): raise ConfigurationError( "Tag set %s invalid, must be a dict" % repr(tags)) return value # jounal is an alias for j, # wtimeoutms is an alias for wtimeout VALIDATORS = { 'replicaset': validate_basestring, 'slaveok': validate_boolean, 'slave_okay': validate_boolean, 'safe': validate_boolean, 'w': validate_int_or_basestring, 'wtimeout': validate_integer, 'wtimeoutms': validate_integer, 'fsync': validate_boolean, 'j': validate_boolean, 'journal': validate_boolean, 'connecttimeoutms': validate_timeout_or_none, 'sockettimeoutms': validate_timeout_or_none, 'ssl': validate_boolean, 'read_preference': validate_read_preference, 'tag_sets': validate_tag_sets, 'secondaryacceptablelatencyms': validate_positive_float, 'secondary_acceptable_latency_ms': validate_positive_float, 'auto_start_request': validate_boolean, 'use_greenlets': validate_boolean, } def validate(option, value): """Generic validation function. """ lower = option.lower() validator = VALIDATORS.get(lower, raise_config_error) value = validator(option, value) return lower, value SAFE_OPTIONS = frozenset([ 'w', 'wtimeout', 'wtimeoutms', 'fsync', 'j', 'journal' ]) class BaseObject(object): """A base class that provides attributes and methods common to multiple pymongo classes. SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO 10GEN """ def __init__(self, **options): self.__slave_okay = False self.__read_pref = ReadPreference.PRIMARY self.__tag_sets = [{}] self.__secondary_acceptable_latency_ms = 15 self.__safe = None self.__safe_opts = {} self.__set_options(options) if (self.__read_pref == ReadPreference.PRIMARY and self.__tag_sets != [{}] ): raise ConfigurationError( "ReadPreference PRIMARY cannot be combined with tags") # If safe hasn't been implicitly set by write concerns then set it. if self.__safe is None: self.__safe = validate_boolean('safe', options.get("safe", False)) if self.__safe and not options.get("safe", True): warnings.warn("Conflicting write concerns. Safe set as False " "but write concerns have been set making safe True. " "Please set safe to True.", UserWarning) def __set_safe_option(self, option, value, check=False): """Validates and sets getlasterror options for this object (Connection, Database, Collection, etc.) """ if value is None: self.__safe_opts.pop(option, None) else: if check: option, value = validate(option, value) self.__safe_opts[option] = value self.__safe = True def __set_options(self, options): """Validates and sets all options passed to this object.""" for option, value in options.iteritems(): if option in ('slave_okay', 'slaveok'): self.__slave_okay = validate_boolean(option, value) elif option == 'read_preference': self.__read_pref = validate_read_preference(option, value) elif option == 'tag_sets': self.__tag_sets = validate_tag_sets(option, value) elif option in ( 'secondaryAcceptableLatencyMS', 'secondary_acceptable_latency_ms' ): self.__secondary_acceptable_latency_ms = \ validate_positive_float(option, value) elif option in SAFE_OPTIONS: if option == 'journal': self.__set_safe_option('j', value) elif option == 'wtimeoutms': self.__set_safe_option('wtimeout', value) else: self.__set_safe_option(option, value) def __get_slave_okay(self): """DEPRECATED. Use `read_preference` instead. .. versionchanged:: 2.1 Deprecated slave_okay. .. versionadded:: 2.0 """ return self.__slave_okay def __set_slave_okay(self, value): """Property setter for slave_okay""" warnings.warn("slave_okay is deprecated. Please use " "read_preference instead.", DeprecationWarning) self.__slave_okay = validate_boolean('slave_okay', value) slave_okay = property(__get_slave_okay, __set_slave_okay) def __get_read_pref(self): """The read preference mode for this instance. See :class:`~pymongo.read_preferences.ReadPreference` for available options. .. versionadded:: 2.1 """ return self.__read_pref def __set_read_pref(self, value): """Property setter for read_preference""" self.__read_pref = validate_read_preference('read_preference', value) read_preference = property(__get_read_pref, __set_read_pref) def __get_acceptable_latency(self): """Any replica-set member whose ping time is within secondary_acceptable_latency_ms of the nearest member may accept reads. Defaults to 15 milliseconds. See :class:`~pymongo.read_preferences.ReadPreference`. .. versionadded:: 2.3 """ return self.__secondary_acceptable_latency_ms def __set_acceptable_latency(self, value): """Property setter for secondary_acceptable_latency_ms""" self.__secondary_acceptable_latency_ms = (validate_positive_float( 'secondary_acceptable_latency_ms', value)) secondary_acceptable_latency_ms = property( __get_acceptable_latency, __set_acceptable_latency) def __get_tag_sets(self): """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to read only from members whose ``dc`` tag has the value ``"ny"``. To specify a priority-order for tag sets, provide a list of tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag set, ``{}``, means "read from any member that matches the mode, ignoring tags." ReplicaSetConnection tries each set of tags in turn until it finds a set of tags with at least one matching member. .. seealso:: `Data-Center Awareness <http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_ .. versionadded:: 2.3 """ return self.__tag_sets def __set_tag_sets(self, value): """Property setter for tag_sets""" self.__tag_sets = validate_tag_sets('tag_sets', value) tag_sets = property(__get_tag_sets, __set_tag_sets) def __get_safe(self): """Use getlasterror with every write operation? .. versionadded:: 2.0 """ return self.__safe def __set_safe(self, value): """Property setter for safe""" self.__safe = validate_boolean('safe', value) safe = property(__get_safe, __set_safe) def get_lasterror_options(self): """Returns a dict of the getlasterror options set on this instance. .. versionadded:: 2.0 """ return self.__safe_opts.copy() def set_lasterror_options(self, **kwargs): """Set getlasterror options for this instance. Valid options include j=<bool>, w=<int>, wtimeout=<int>, and fsync=<bool>. Implies safe=True. :Parameters: - `**kwargs`: Options should be passed as keyword arguments (e.g. w=2, fsync=True) .. versionadded:: 2.0 """ for key, value in kwargs.iteritems(): self.__set_safe_option(key, value, check=True) def unset_lasterror_options(self, *options): """Unset getlasterror options for this instance. If no options are passed unsets all getlasterror options. This does not set `safe` to False. :Parameters: - `*options`: The list of options to unset. .. versionadded:: 2.0 """ if len(options): for option in options: self.__safe_opts.pop(option, None) else: self.__safe_opts = {} def _get_safe_and_lasterror_options(self, safe=None, **options): """Get the current safe mode and any getLastError options. Determines if the current write is safe or not based on the passed in or inherited safe value. Passing any write concerns automatically sets safe to True. :Parameters: - `safe`: check that the operation succeeded? - `**options`: overriding getLastError options .. versionadded:: 2.3 """ if safe is None: safe = self.safe safe = validate_boolean('safe', safe) if safe or options: safe = True if not options: options.update(self.get_lasterror_options()) return safe, options
# -*- coding: utf-8 -*- # Notes: # # - this plug-in uses LRCache.jar from export_flow_datasets/lib. # - this plug-in requires Jython version 2.7 (for json module) ''' Aggregation plug-in to generate FCS plots. The first time a scatterplot is generated for an FCS file, the data is read and stored in a CSV file that is registered in openBIS. Later plots will use the CSV file directly. @author: Aaron Ponti ''' import os.path import logging import java.io.File import java.util.ArrayList import json import uuid from threading import Thread from ch.ethz.scu.obit.flow.readers import FCSReader from ch.ethz.scu.obit.flow.readers import Hyperlog from ch.ethz.scu.obit.common.server.longrunning import LRCache def setUpLogging(): """Sets up logging and returns the logger object.""" # Get path to containing folder # __file__ does not work (reliably) in Jython rpPath = "../core-plugins/flow/2/dss/reporting-plugins/retrieve_fcs_events" # Path to the logs subfolder logPath = os.path.join(rpPath, "logs") # Make sure the logs subfolder exist if not os.path.exists(logPath): os.makedirs(logPath) # Path for the log file logFile = os.path.join(logPath, "log.txt") # Create the logger logging.basicConfig(filename=logFile, level=logging.DEBUG, format='%(asctime)-15s %(levelname)s: %(message)s') _logger = logging.getLogger("FlowFCSPlotter") return _logger def getFileForCode(code): """ Get the path to the FCS file that is associated to the given dataSet. If not files are found, returns []. """ dataSetFiles = [] content = contentProvider.getContent(code) nodes = content.listMatchingNodes("original", ".*\.fcs") if nodes is not None: for node in nodes: fileName = node.tryGetFile() if fileName is not None: fileName = str(fileName) if fileName.lower().endswith(".fcs"): dataSetFiles.append(fileName) # Return the files return dataSetFiles # Plug-in entry point # # This plug-in always returns immediately. The first time it is called, it # starts the retrieve process in a separate thread and returns a unique ID to # the client that will later use to retrieve the state of the progress. # # This method takes a list of parameters that also returns in a table (tableBuilder) # to the client. The names of the input parameters match the corresponding # column names. The following list describes the input parameters: # # uid : unique identifier of the running plug-in. The first time it is # either omitted or passed as "" from the client, since it is the # server that creates the unique ID for the job. After this is # returned to the client in the first call, it must be passed on # again as a parameter to the server. # code : code of the FCS file to be loaded to retrieve the data to be plotted. # paramX : name of the parameter for the X axis # paramY : name of the parameter for the Y axis # displayX : scaling (linear or logarithmic) of the X axis -- CURRENTLY UNUSED # displayY : scaling (linear or logarithmic) of the Y axis -- CURRENTLY UNUSED # numEvents: total number of events known to be in the file # maxNumEvents: max number of events to be returned for plotting. # nodeKey : key of the FCS node in the tree. This is not used here, but needs # to be passed back at the end of the process since it will be used # for caching the data in the node itself to speed up subsequent # plots. # # The following are NOT input parameters and are only returned in the # tableBuilder (i.e. all the input parameters above are ALSO returned): # # completed: True if the process has completed in the meanwhile, False if it # is still running. # success : True if the process completed successfully, False otherwise. # message : message to be displayed in the client. Please notice that this is # not necessarily an error message (i.e. is success is True it will # be a success message). # data : the data read from the FCS/CSV file to be plotted in the client def aggregate(parameters, tableBuilder): # Add the table headers tableBuilder.addHeader("uid") tableBuilder.addHeader("completed") tableBuilder.addHeader("success") tableBuilder.addHeader("message") tableBuilder.addHeader("data") tableBuilder.addHeader("code") tableBuilder.addHeader("paramX") tableBuilder.addHeader("paramY") tableBuilder.addHeader("displayX") tableBuilder.addHeader("displayY") tableBuilder.addHeader("numEvents") tableBuilder.addHeader("maxNumEvents") tableBuilder.addHeader("samplingMethod") tableBuilder.addHeader("nodeKey") # Get the ID of the call if it already exists uid = parameters.get("uid"); if uid is None or uid == "": # Create a unique id uid = str(uuid.uuid4()) # Fill in relevant information row = tableBuilder.addRow() row.setCell("uid", uid) row.setCell("completed", False) row.setCell("success", True) row.setCell("message", "") row.setCell("data", "") row.setCell("code", "") row.setCell("paramX", "") row.setCell("paramY", "") row.setCell("displayX", "") row.setCell("displayY", "") row.setCell("numEvents", "") row.setCell("maxNumEvents", "") row.setCell("samplingMethod", "") row.setCell("nodeKey", "") # Launch the actual process in a separate thread thread = Thread(target = retrieveProcess, args = (parameters, tableBuilder, uid)) thread.start() # Return immediately return # The process is already running in a separate thread. We get current # results and return them resultToSend = LRCache.get(uid); if resultToSend is None: # This should not happen raise Exception("Could not retrieve results from result cache!") # Fill in relevant information row = tableBuilder.addRow() row.setCell("uid", resultToSend["uid"]) row.setCell("completed", resultToSend["completed"]) row.setCell("success", resultToSend["success"]) row.setCell("message", resultToSend["message"]) row.setCell("data", resultToSend["data"]) row.setCell("code", resultToSend["code"]) row.setCell("paramX", resultToSend["paramX"]) row.setCell("paramY", resultToSend["paramY"]) row.setCell("displayX", resultToSend["displayX"]) row.setCell("displayY", resultToSend["displayY"]) row.setCell("numEvents", resultToSend["numEvents"]) row.setCell("maxNumEvents", resultToSend["maxNumEvents"]) row.setCell("samplingMethod", resultToSend["samplingMethod"]) row.setCell("nodeKey", resultToSend["nodeKey"]) # Perform the retrieve process in a separate thread def retrieveProcess(parameters, tableBuilder, uid): # Make sure to initialize and store the results. We need to have them since # most likely the client will try to retrieve them again before the process # is finished. resultToStore = {} resultToStore["uid"] = uid resultToStore["completed"] = False resultToStore["success"] = True resultToStore["message"] = "" resultToStore["data"] = "" # Get the parameters # Get the entity code code = parameters.get("code") resultToStore["code"] = code # Get the X-axis parameter paramX = parameters.get("paramX") resultToStore["paramX"] = paramX # Get the Y-axis parameter paramY = parameters.get("paramY") resultToStore["paramY"] = paramY # Get the X-axis scaling displayX = parameters.get("displayX") resultToStore["displayX"] = displayX # Get the Y-axis scaling displayY = parameters.get("displayY") resultToStore["displayY"] = displayY # Number of events known to be in the file numEvents = int(parameters.get("numEvents")) resultToStore["numEvents"] = numEvents # Maximum number of events to return maxNumEvents = int(parameters.get("maxNumEvents")) resultToStore["maxNumEvents"] = maxNumEvents # Sampling samplingMethod = parameters.get("samplingMethod") resultToStore["samplingMethod"] = samplingMethod # Node key nodeKey = parameters.get("nodeKey") resultToStore["nodeKey"] = nodeKey # Store them into the cache LRCache.set(uid, resultToStore) # Set up logging _logger = setUpLogging() # Log parameter info _logger.info("Requested events for dataset " + code + " and parameters (" + paramX + ", " + paramY + ")") _logger.info("Requested scaling for parameter " + paramX + ": " + displayX) _logger.info("Requested scaling for parameter " + paramY + ": " + displayY) _logger.info("Requested sampling method: " + samplingMethod) _logger.info("Number of events in file: " + str(numEvents) + "; maximum number of events to return: " + str(maxNumEvents)) # Get the FCS file to process dataSetFiles = getFileForCode(code) # Prepare the data dataJSON = "" if len(dataSetFiles) != 1: # Build the error message message = "Could not retrieve the FCS file to process!" # Log the error _logger.error(message) # Store the results and set the completed flag resultToStore["completed"] = True resultToStore["success"] = False resultToStore["message"] = message # Return here return else: # Get the FCS file path fcsFile = dataSetFiles[0] # Log _logger.info("Dataset code " + code + " corresponds to FCS file " + \ fcsFile) # Open the FCS file reader = FCSReader(java.io.File(fcsFile), True); # Parse the file with data if not reader.parse(): # Build the error message message = "Could not process file " + os.path.basename(fcsFile) # Log the error _logger.error(message) # Store the results and set the completed flag resultToStore["completed"] = True resultToStore["success"] = False resultToStore["message"] = message # Return here return # Preparation steps were successful parameterNames = reader.getParameterNames() # Find the indices of the requested parameters indxX = int(parameterNames.indexOf(paramX)) indxY = int(parameterNames.indexOf(paramY)) # Prepare the data arrays data = [] # Actual number of events to be extracted actualNumEvents = min(maxNumEvents, numEvents) # Data sampling method. # # Method 1: the get the requested number of events, we will sub- # sample the file by skipping a certain number of rows # ("step") in between the returned once. # Method 2: to get the requested number of events, we just return # the first N rows at the beginning of the file. This is # faster, and as far as the experts say, should still be # reasonably representative of the underlying population. if samplingMethod == "1": sample = True else: sample = False # Now collect the first maxNumEvents rows dataX = reader.getDataPerColumnIndex(indxX, actualNumEvents, sample) dataY = reader.getDataPerColumnIndex(indxY, actualNumEvents, sample) # Is the Hyperlog scaling requested? if displayX == "Hyperlog": params = Hyperlog.estimateParamHeuristic(dataX) Hx = Hyperlog(params[0], params[1], params[2], params[3]) dataX = Hx.transform(dataX) dataX = Hyperlog.arrayMult(dataX, params[0]) if displayY == "Hyperlog": params = Hyperlog.estimateParamHeuristic(dataY) Hy = Hyperlog(params[0], params[1], params[2], params[3]) dataY = Hy.transform(dataY) dataY = Hyperlog.arrayMult(dataY, params[0]) # Build array to JSONify and return to the client for i in range (actualNumEvents): data.append([float(dataX[i]), float(dataY[i])]) # JSON encode the data array dataJSON = json.dumps(data) # Success message message = "Successfully processed file " + fcsFile # Log _logger.info(message) # Success success = True # Store the results and set the completed flag resultToStore["completed"] = True resultToStore["success"] = True resultToStore["message"] = message resultToStore["data"] = dataJSON
# orm/persistence.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """private module containing functions used to emit INSERT, UPDATE and DELETE statements on behalf of a :class:`.Mapper` and its descending mappers. The functions here are called only by the unit of work functions in unitofwork.py. """ import operator from itertools import groupby from sqlalchemy import sql, util, exc as sa_exc from sqlalchemy.orm import attributes, sync, \ exc as orm_exc from sqlalchemy.orm.util import _state_mapper, state_str def save_obj(base_mapper, states, uowtransaction, single=False): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. This is called within the context of a UOWTransaction during a flush operation, given a list of states to be flushed. The base mapper in an inheritance hierarchy handles the inserts/ updates for all descendant mappers. """ # if batch=false, call _save_obj separately for each object if not single and not base_mapper.batch: for state in _sort_states(states): save_obj(base_mapper, [state], uowtransaction, single=True) return states_to_insert, states_to_update = _organize_states_for_save( base_mapper, states, uowtransaction) cached_connections = _cached_connection_dict(base_mapper) for table, mapper in base_mapper._sorted_tables.iteritems(): insert = _collect_insert_commands(base_mapper, uowtransaction, table, states_to_insert) update = _collect_update_commands(base_mapper, uowtransaction, table, states_to_update) if update: _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update) if insert: _emit_insert_statements(base_mapper, uowtransaction, cached_connections, table, insert) _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update) def post_update(base_mapper, states, uowtransaction, post_update_cols): """Issue UPDATE statements on behalf of a relationship() which specifies post_update. """ cached_connections = _cached_connection_dict(base_mapper) states_to_update = _organize_states_for_post_update( base_mapper, states, uowtransaction) for table, mapper in base_mapper._sorted_tables.iteritems(): update = _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols) if update: _emit_post_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update) def delete_obj(base_mapper, states, uowtransaction): """Issue ``DELETE`` statements for a list of objects. This is called within the context of a UOWTransaction during a flush operation. """ cached_connections = _cached_connection_dict(base_mapper) states_to_delete = _organize_states_for_delete( base_mapper, states, uowtransaction) table_to_mapper = base_mapper._sorted_tables for table in reversed(table_to_mapper.keys()): delete = _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete) mapper = table_to_mapper[table] _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete) for state, state_dict, mapper, has_identity, connection \ in states_to_delete: mapper.dispatch.after_delete(mapper, connection, state) def _organize_states_for_save(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for INSERT or UPDATE. This includes splitting out into distinct lists for each, calling before_insert/before_update, obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state, and the identity flag. """ states_to_insert = [] states_to_update = [] for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): has_identity = bool(state.key) instance_key = state.key or mapper._identity_key_from_state(state) row_switch = None # call before_XXX extensions if not has_identity: mapper.dispatch.before_insert(mapper, connection, state) else: mapper.dispatch.before_update(mapper, connection, state) # detect if we have a "pending" instance (i.e. has # no instance_key attached to it), and another instance # with the same identity key already exists as persistent. # convert to an UPDATE if so. if not has_identity and \ instance_key in uowtransaction.session.identity_map: instance = \ uowtransaction.session.identity_map[instance_key] existing = attributes.instance_state(instance) if not uowtransaction.is_deleted(existing): raise orm_exc.FlushError( "New instance %s with identity key %s conflicts " "with persistent instance %s" % (state_str(state), instance_key, state_str(existing))) base_mapper._log_debug( "detected row switch for identity %s. " "will update %s, remove %s from " "transaction", instance_key, state_str(state), state_str(existing)) # remove the "delete" flag from the existing element uowtransaction.remove_state_actions(existing) row_switch = existing if not has_identity and not row_switch: states_to_insert.append( (state, dict_, mapper, connection, has_identity, instance_key, row_switch) ) else: states_to_update.append( (state, dict_, mapper, connection, has_identity, instance_key, row_switch) ) return states_to_insert, states_to_update def _organize_states_for_post_update(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for UPDATE corresponding to post_update. This includes obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state. """ return list(_connections_for_states(base_mapper, uowtransaction, states)) def _organize_states_for_delete(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for DELETE. This includes calling out before_delete and obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state. """ states_to_delete = [] for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): mapper.dispatch.before_delete(mapper, connection, state) states_to_delete.append((state, dict_, mapper, bool(state.key), connection)) return states_to_delete def _collect_insert_commands(base_mapper, uowtransaction, table, states_to_insert): """Identify sets of values to use in INSERT statements for a list of states. """ insert = [] for state, state_dict, mapper, connection, has_identity, \ instance_key, row_switch in states_to_insert: if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] params = {} value_params = {} has_all_pks = True for col in mapper._cols_by_table[table]: if col is mapper.version_id_col: params[col.key] = mapper.version_id_generator(None) else: # pull straight from the dict for # pending objects prop = mapper._columntoproperty[col] value = state_dict.get(prop.key, None) if value is None: if col in pks: has_all_pks = False elif col.default is None and \ col.server_default is None: params[col.key] = value elif isinstance(value, sql.ClauseElement): value_params[col] = value else: params[col.key] = value insert.append((state, state_dict, params, mapper, connection, value_params, has_all_pks)) return insert def _collect_update_commands(base_mapper, uowtransaction, table, states_to_update): """Identify sets of values to use in UPDATE statements for a list of states. This function works intricately with the history system to determine exactly what values should be updated as well as how the row should be matched within an UPDATE statement. Includes some tricky scenarios where the primary key of an object might have been changed. """ update = [] for state, state_dict, mapper, connection, has_identity, \ instance_key, row_switch in states_to_update: if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] params = {} value_params = {} hasdata = hasnull = False for col in mapper._cols_by_table[table]: if col is mapper.version_id_col: params[col._label] = \ mapper._get_committed_state_attr_by_column( row_switch or state, row_switch and row_switch.dict or state_dict, col) prop = mapper._columntoproperty[col] history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE ) if history.added: params[col.key] = history.added[0] hasdata = True else: params[col.key] = mapper.version_id_generator( params[col._label]) # HACK: check for history, in case the # history is only # in a different table than the one # where the version_id_col is. for prop in mapper._columntoproperty.itervalues(): history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE) if history.added: hasdata = True else: prop = mapper._columntoproperty[col] history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE) if history.added: if isinstance(history.added[0], sql.ClauseElement): value_params[col] = history.added[0] else: value = history.added[0] params[col.key] = value if col in pks: if history.deleted and \ not row_switch: # if passive_updates and sync detected # this was a pk->pk sync, use the new # value to locate the row, since the # DB would already have set this if ("pk_cascaded", state, col) in \ uowtransaction.attributes: value = history.added[0] params[col._label] = value else: # use the old value to # locate the row value = history.deleted[0] params[col._label] = value hasdata = True else: # row switch logic can reach us here # remove the pk from the update params # so the update doesn't # attempt to include the pk in the # update statement del params[col.key] value = history.added[0] params[col._label] = value if value is None: hasnull = True else: hasdata = True elif col in pks: value = state.manager[prop.key].impl.get( state, state_dict) if value is None: hasnull = True params[col._label] = value if hasdata: if hasnull: raise sa_exc.FlushError( "Can't update table " "using NULL for primary " "key value") update.append((state, state_dict, params, mapper, connection, value_params)) return update def _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols): """Identify sets of values to use in UPDATE statements for a list of states within a post_update operation. """ update = [] for state, state_dict, mapper, connection in states_to_update: if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] params = {} hasdata = False for col in mapper._cols_by_table[table]: if col in pks: params[col._label] = \ mapper._get_state_attr_by_column( state, state_dict, col) elif col in post_update_cols: prop = mapper._columntoproperty[col] history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE) if history.added: value = history.added[0] params[col.key] = value hasdata = True if hasdata: update.append((state, state_dict, params, mapper, connection)) return update def _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete): """Identify values to use in DELETE statements for a list of states to be deleted.""" delete = util.defaultdict(list) for state, state_dict, mapper, has_identity, connection \ in states_to_delete: if not has_identity or table not in mapper._pks_by_table: continue params = {} delete[connection].append(params) for col in mapper._pks_by_table[table]: params[col.key] = \ value = \ mapper._get_state_attr_by_column( state, state_dict, col) if value is None: raise sa_exc.FlushError( "Can't delete from table " "using NULL for primary " "key value") if mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col): params[mapper.version_id_col.key] = \ mapper._get_committed_state_attr_by_column( state, state_dict, mapper.version_id_col) return delete def _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected by _collect_update_commands().""" needs_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def update_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append(col == sql.bindparam(col._label, type_=col.type)) if needs_version_id: clause.clauses.append(mapper.version_id_col ==\ sql.bindparam(mapper.version_id_col._label, type_=col.type)) return table.update(clause) statement = base_mapper._memo(('update', table), update_stmt) rows = 0 for state, state_dict, params, mapper, \ connection, value_params in update: if value_params: c = connection.execute( statement.values(value_params), params) else: c = cached_connections[connection].\ execute(statement, params) _postfetch( mapper, uowtransaction, table, state, state_dict, c.context.prefetch_cols, c.context.postfetch_cols, c.context.compiled_parameters[0], value_params) rows += c.rowcount if connection.dialect.supports_sane_rowcount: if rows != len(update): raise orm_exc.StaleDataError( "UPDATE statement on table '%s' expected to " "update %d row(s); %d were matched." % (table.description, len(update), rows)) elif needs_version_id: util.warn("Dialect %s does not support updated rowcount " "- versioning cannot be verified." % c.dialect.dialect_description, stacklevel=12) def _emit_insert_statements(base_mapper, uowtransaction, cached_connections, table, insert): """Emit INSERT statements corresponding to value lists collected by _collect_insert_commands().""" statement = base_mapper._memo(('insert', table), table.insert) for (connection, pkeys, hasvalue, has_all_pks), \ records in groupby(insert, lambda rec: (rec[4], rec[2].keys(), bool(rec[5]), rec[6]) ): if has_all_pks and not hasvalue: records = list(records) multiparams = [rec[2] for rec in records] c = cached_connections[connection].\ execute(statement, multiparams) for (state, state_dict, params, mapper, conn, value_params, has_all_pks), \ last_inserted_params in \ zip(records, c.context.compiled_parameters): _postfetch( mapper, uowtransaction, table, state, state_dict, c.context.prefetch_cols, c.context.postfetch_cols, last_inserted_params, value_params) else: for state, state_dict, params, mapper, \ connection, value_params, \ has_all_pks in records: if value_params: result = connection.execute( statement.values(value_params), params) else: result = cached_connections[connection].\ execute(statement, params) primary_key = result.context.inserted_primary_key if primary_key is not None: # set primary key attributes for pk, col in zip(primary_key, mapper._pks_by_table[table]): prop = mapper._columntoproperty[col] if state_dict.get(prop.key) is None: # TODO: would rather say: #state_dict[prop.key] = pk mapper._set_state_attr_by_column( state, state_dict, col, pk) _postfetch( mapper, uowtransaction, table, state, state_dict, result.context.prefetch_cols, result.context.postfetch_cols, result.context.compiled_parameters[0], value_params) def _emit_post_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected by _collect_post_update_commands().""" def update_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append(col == sql.bindparam(col._label, type_=col.type)) return table.update(clause) statement = base_mapper._memo(('post_update', table), update_stmt) # execute each UPDATE in the order according to the original # list of states to guarantee row access order, but # also group them into common (connection, cols) sets # to support executemany(). for key, grouper in groupby( update, lambda rec: (rec[4], rec[2].keys()) ): connection = key[0] multiparams = [params for state, state_dict, params, mapper, conn in grouper] cached_connections[connection].\ execute(statement, multiparams) def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete): """Emit DELETE statements corresponding to value lists collected by _collect_delete_commands().""" need_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def delete_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append( col == sql.bindparam(col.key, type_=col.type)) if need_version_id: clause.clauses.append( mapper.version_id_col == sql.bindparam( mapper.version_id_col.key, type_=mapper.version_id_col.type ) ) return table.delete(clause) for connection, del_objects in delete.iteritems(): statement = base_mapper._memo(('delete', table), delete_stmt) connection = cached_connections[connection] if need_version_id: # TODO: need test coverage for this [ticket:1761] if connection.dialect.supports_sane_rowcount: rows = 0 # execute deletes individually so that versioned # rows can be verified for params in del_objects: c = connection.execute(statement, params) rows += c.rowcount if rows != len(del_objects): raise orm_exc.StaleDataError( "DELETE statement on table '%s' expected to " "delete %d row(s); %d were matched." % (table.description, len(del_objects), c.rowcount) ) else: util.warn( "Dialect %s does not support deleted rowcount " "- versioning cannot be verified." % connection.dialect.dialect_description, stacklevel=12) connection.execute(statement, del_objects) else: connection.execute(statement, del_objects) def _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update): """finalize state on states that have been inserted or updated, including calling after_insert/after_update events. """ for state, state_dict, mapper, connection, has_identity, \ instance_key, row_switch in states_to_insert + \ states_to_update: if mapper._readonly_props: readonly = state.unmodified_intersection( [p.key for p in mapper._readonly_props if p.expire_on_flush or p.key not in state.dict] ) if readonly: state.expire_attributes(state.dict, readonly) # if eager_defaults option is enabled, # refresh whatever has been expired. if base_mapper.eager_defaults and state.unloaded: state.key = base_mapper._identity_key_from_state(state) uowtransaction.session.query(base_mapper)._load_on_ident( state.key, refresh_state=state, only_load_props=state.unloaded) # call after_XXX extensions if not has_identity: mapper.dispatch.after_insert(mapper, connection, state) else: mapper.dispatch.after_update(mapper, connection, state) def _postfetch(mapper, uowtransaction, table, state, dict_, prefetch_cols, postfetch_cols, params, value_params): """Expire attributes in need of newly persisted database state, after an INSERT or UPDATE statement has proceeded for that state.""" if mapper.version_id_col is not None: prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] for c in prefetch_cols: if c.key in params and c in mapper._columntoproperty: mapper._set_state_attr_by_column(state, dict_, c, params[c.key]) if postfetch_cols: state.expire_attributes(state.dict, [mapper._columntoproperty[c].key for c in postfetch_cols if c in mapper._columntoproperty] ) # synchronize newly inserted ids from one table to the next # TODO: this still goes a little too often. would be nice to # have definitive list of "columns that changed" here for m, equated_pairs in mapper._table_to_equated[table]: sync.populate(state, m, state, m, equated_pairs, uowtransaction, mapper.passive_updates) def _connections_for_states(base_mapper, uowtransaction, states): """Return an iterator of (state, state.dict, mapper, connection). The states are sorted according to _sort_states, then paired with the connection they should be using for the given unit of work transaction. """ # if session has a connection callable, # organize individual states with the connection # to use for update if uowtransaction.session.connection_callable: connection_callable = \ uowtransaction.session.connection_callable else: connection = None connection_callable = None for state in _sort_states(states): if connection_callable: connection = connection_callable(base_mapper, state.obj()) elif not connection: connection = uowtransaction.transaction.connection( base_mapper) mapper = _state_mapper(state) yield state, state.dict, mapper, connection def _cached_connection_dict(base_mapper): # dictionary of connection->connection_with_cache_options. return util.PopulateDict( lambda conn:conn.execution_options( compiled_cache=base_mapper._compiled_cache )) def _sort_states(states): pending = set(states) persistent = set(s for s in pending if s.key is not None) pending.difference_update(persistent) return sorted(pending, key=operator.attrgetter("insert_order")) + \ sorted(persistent, key=lambda q:q.key[1])
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for segment reduction ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes as dtypes_lib from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class SegmentReductionHelper(test.TestCase): def _input(self, input_shape, dtype=dtypes_lib.int32): num_elem = 1 for x in input_shape: num_elem *= x values = np.arange(1, num_elem + 1) np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype) # Add a non-zero imaginary component to complex types. if dtype.is_complex: np_values -= 1j * np_values return constant_op.constant( np_values, shape=input_shape, dtype=dtype), np_values def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None, initial_value=0): if not x.size: return np.array([]) indices = np.asarray(indices) if num_segments is None: num_segments = indices[-1] + 1 output = [None] * num_segments slice_shape = x.shape[indices.ndim:] x_flat = x.reshape((indices.size,) + slice_shape) for i, index in enumerate(indices.ravel()): if (output[index] is not None) and op1 == np.max: for j in range(0, output[index].shape[0]): output[index][j] = op1([output[index][j], x_flat[i][j]]) elif output[index] is not None: output[index] = op1(output[index], x_flat[i]) else: output[index] = x_flat[i] # zero initialize values that are still uncalculated. initial_value_slice = np.ones(slice_shape) * initial_value output = [o if o is not None else initial_value_slice for o in output] if op2 is not None: output = [op2(o) for o in output] output = [o.reshape(slice_shape) for o in output] return np.array(output) def _mean_cum_op(self, x, y): return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2) def _mean_reduce_op(self, x): return x[0] / x[1] if isinstance(x, tuple) else x def _sqrt_n_reduce_op(self, x): return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x class SegmentReductionOpTest(SegmentReductionHelper): def testValues(self): dtypes = [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64, dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128 ] # Each item is np_op1, np_op2, tf_op ops_list = [(np.add, None, math_ops.segment_sum), (self._mean_cum_op, self._mean_reduce_op, math_ops.segment_mean), (np.ndarray.__mul__, None, math_ops.segment_prod), (np.minimum, None, math_ops.segment_min), (np.maximum, None, math_ops.segment_max)] # A subset of ops has been enabled for complex numbers complex_ops_list = [(np.add, None, math_ops.segment_sum), (np.ndarray.__mul__, None, math_ops.segment_prod), (self._mean_cum_op, self._mean_reduce_op, math_ops.segment_mean)] n = 10 shape = [n, 2] indices = [i // 3 for i in range(n)] for dtype in dtypes: if dtype in (dtypes_lib.complex64, dtypes_lib.complex128): curr_ops_list = complex_ops_list else: curr_ops_list = ops_list for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, np_x = self._input(shape, dtype=dtype) for np_op1, np_op2, tf_op in curr_ops_list: np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2) s = tf_op(data=tf_x, segment_ids=indices) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) # NOTE(mrry): The static shape inference that computes # `tf_ans.shape` can only infer that sizes from dimension 1 # onwards, because the size of dimension 0 is data-dependent # and may therefore vary dynamically. self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:]) @test_util.run_deprecated_v1 def testSegmentIdsShape(self): shape = [4, 4] tf_x, _ = self._input(shape) indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2]) with self.assertRaises(ValueError): math_ops.segment_sum(data=tf_x, segment_ids=indices) @test_util.run_deprecated_v1 def testSegmentIdsSize(self): shape = [4, 4] for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, _ = self._input(shape) indices = [0, 1] s = math_ops.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment_ids should be the same size"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentIdsValid(self): # This is a baseline for the following SegmentIdsInvalid* tests. shape = [4, 4] for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, _ = self._input(shape, dtype=dtypes_lib.float32) indices = [0, 0, 0, 1] result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval() self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result) def testSegmentIdsGreaterThanZero(self): shape = [4, 4] for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32) indices = [1, 1, 2, 2] np_ans = self._segmentReduce(indices, np_x, np.add) s = math_ops.segment_sum(data=tf_x, segment_ids=indices) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) def testSegmentIdsHole(self): shape = [4, 4] for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32) indices = [0, 0, 3, 3] np_ans = self._segmentReduce(indices, np_x, np.add) s = math_ops.segment_sum(data=tf_x, segment_ids=indices) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) @test_util.run_deprecated_v1 def testSegmentIdsInvalid1(self): shape = [4, 4] with self.cached_session(): tf_x, _ = self._input(shape) indices = [-1, -1, 0, 0] s = math_ops.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError( r"Segment id -1 out of range \[0, 1\), possibly because " "'segment_ids' input is not sorted."): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentIdsInvalid2(self): shape = [4, 4] with self.cached_session(): tf_x, _ = self._input(shape) indices = [0, 1, 0, 1] s = math_ops.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids are not increasing"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentIdsInvalid3(self): shape = [4, 4] with self.cached_session(): tf_x, _ = self._input(shape) indices = [0, 1, 2, 0] s = math_ops.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError( r"Segment id 1 out of range \[0, 1\), possibly " "because 'segment_ids' input is not sorted."): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentIdsInvalid4(self): shape = [4, 4] for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, _ = self._input(shape, dtype=dtypes_lib.float32) indices = [0, 0, 0, -1] s = math_ops.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids must be >= 0"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentIdsInvalid5(self): shape = [4, 4] for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): tf_x, _ = self._input(shape, dtype=dtypes_lib.float32) indices = [0, 0, 0, -2] s = math_ops.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids must be >= 0"): self.evaluate(s) @test_util.run_deprecated_v1 def testGradient(self): shape = [4, 4] indices = [0, 1, 2, 2] for tf_op in [ math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min, math_ops.segment_max ]: with self.cached_session(): tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64) s = tf_op(data=tf_x, segment_ids=indices) jacob_t, jacob_n = gradient_checker.compute_gradient( tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n) def testDataInvalid(self): # Test case for GitHub issue 40653. for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): with self.assertRaisesRegex( (ValueError, errors_impl.InvalidArgumentError), "must be at least rank 1"): s = math_ops.segment_mean( data=np.uint16(10), segment_ids=np.array([]).astype("int64")) self.evaluate(s) class UnsortedSegmentTest(SegmentReductionHelper): def __init__(self, methodName='runTest'): # Each item is np_op1, np_op2, tf_op, initial_value functor self.ops_list = [(np.add, None, math_ops.unsorted_segment_sum, lambda t: 0), (self._mean_cum_op, self._mean_reduce_op, math_ops.unsorted_segment_mean, lambda t: 0), (self._mean_cum_op, self._sqrt_n_reduce_op, math_ops.unsorted_segment_sqrt_n, lambda t: 0), (np.ndarray.__mul__, None, math_ops.unsorted_segment_prod, lambda t: 1), (np.minimum, None, math_ops.unsorted_segment_min, lambda t: t.max), (np.maximum, None, math_ops.unsorted_segment_max, lambda t: t.min)] # A subset of ops has been enabled for complex numbers self.complex_ops_list = [(np.add, None, math_ops.unsorted_segment_sum, lambda t: 0), (np.ndarray.__mul__, None, math_ops.unsorted_segment_prod, lambda t: 1)] self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64] self.all_dtypes = (self.differentiable_dtypes + [dtypes_lib.bfloat16, dtypes_lib.int64, dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128]) super(UnsortedSegmentTest, self).__init__(methodName=methodName) def testValues(self): indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3]) num_segments = 12 for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (2,) for dtype in self.all_dtypes: ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list tf_x, np_x = self._input(shape, dtype=dtype) for use_gpu in [True, False]: with self.cached_session(): for np_op1, np_op2, tf_op, init_op in ops_list: # sqrt_n doesn't support integers if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer): continue # todo(philjd): enable this test once real_div supports bfloat16 if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and dtype == dtypes_lib.bfloat16): continue np_ans = self._segmentReduce( indices, np_x, np_op1, np_op2, num_segments=num_segments, initial_value=init_op(dtype)) s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments) tf_ans = self.evaluate(s) if dtype is dtypes_lib.bfloat16: tf_ans = tf_ans.astype(np.float32) self.assertAllCloseAccordingToType(np_ans, tf_ans) self.assertShapeEqual(np_ans, s) def testNumSegmentsTypes(self): dtypes = [dtypes_lib.int32, dtypes_lib.int64] indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3]) num_segments = 12 for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (2,) for dtype in dtypes: with self.cached_session(): tf_x, np_x = self._input(shape) num_segments_constant = constant_op.constant( num_segments, dtype=dtype) np_ans = self._segmentReduce( indices, np_x, np.add, op2=None, num_segments=num_segments) s = math_ops.unsorted_segment_sum( data=tf_x, segment_ids=indices, num_segments=num_segments_constant) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) self.assertShapeEqual(np_ans, s) @test_util.run_deprecated_v1 def testGradientsTFGradients(self): num_cols = 2 indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3]) num_segments = max(indices_flat) + 3 for dtype in self.differentiable_dtypes: ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (num_cols,) # test CPU and GPU as tf.gather behaves differently on each device for use_gpu in [False, True]: with self.cached_session(use_gpu=use_gpu): for _, _, tf_op, _ in ops_list: tf_x, np_x = self._input(shape, dtype=dtype) s = tf_op(tf_x, indices, num_segments) jacob_t, jacob_n = gradient_checker.compute_gradient( tf_x, shape, s, [num_segments, num_cols], x_init_value=np_x, delta=1.) self.assertAllCloseAccordingToType(jacob_t, jacob_n, half_atol=1e-2) @test_util.run_in_graph_and_eager_modes def testGradientsGradientTape(self): num_cols = 2 indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3]) num_segments = max(indices_flat) + 3 for dtype in self.differentiable_dtypes: ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (num_cols,) # test CPU and GPU as tf.gather behaves differently on each device for use_gpu in [test_util.use_gpu, test_util.force_cpu]: with use_gpu(): for _, _, tf_op, _ in ops_list: _, np_x = self._input(shape, dtype=dtype) # pylint: disable=cell-var-from-loop def f(x): return tf_op(x, indices, num_segments) gradient_tape_jacob_t, jacob_n = ( gradient_checker_v2.compute_gradient( f, [np_x], delta=1.)) # pylint: enable=cell-var-from-loop self.assertAllCloseAccordingToType(jacob_n, gradient_tape_jacob_t, half_atol=1e-2) @test_util.run_deprecated_v1 def testProdGrad(self): # additional test for the prod gradient to ensure correct handling of zeros values = np.array([0, 0, 1, 0, 2, 2, 3, 3, 3], dtype=np.float32) indices = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32) indices_neg = np.array([-1, 0, 0, -1, 1, 1, -1, 2, 2], dtype=np.int32) values_tf = constant_op.constant(values) # ground truth partial derivatives gradients_indices = np.zeros((9, 3), dtype=np.float32) gradients_indices_neg = np.zeros((9, 3), dtype=np.float32) # the derivative w.r.t. to the other segments is zero, so here we only # explicitly set the grad values for the corresponding segment gradients_indices[range(9), indices] = [0, 0, 0, 4, 0, 0, 9, 9, 9] gradients_indices_neg[range(9), indices_neg] = [0, 1, 0, 0, 2, 2, 0, 3, 3] for use_gpu in [False, True]: with self.cached_session(use_gpu=use_gpu): for ind, grad_gt in [(indices, gradients_indices), (indices_neg, gradients_indices_neg)]: s = math_ops.unsorted_segment_prod(values_tf, constant_op.constant(ind), 3) jacob_t, jacob_n = gradient_checker.compute_gradient( values_tf, (9,), s, (3,), x_init_value=values, delta=1) self.assertAllClose(jacob_t, jacob_n) self.assertAllClose(jacob_t, grad_gt) @test_util.run_deprecated_v1 def testGradientMatchesSegmentSum(self): # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum # and compare the outputs, which should be identical. # NB: for this test to work, indices must be valid for SegmentSum, namely # it must be sorted, the indices must be contiguous, and num_segments # must be max(indices) + 1. indices = [0, 0, 1, 1, 1, 2, 3, 4, 5] n = len(indices) num_cols = 2 shape = [n, num_cols] num_segments = max(indices) + 1 for dtype in self.differentiable_dtypes: with self.cached_session(): tf_x, np_x = self._input(shape, dtype=dtype) # Results from UnsortedSegmentSum unsorted_s = math_ops.unsorted_segment_sum( data=tf_x, segment_ids=indices, num_segments=num_segments) unsorted_jacob_t, unsorted_jacob_n = ( gradient_checker.compute_gradient(tf_x, shape, unsorted_s, [num_segments, num_cols], x_init_value=np_x, delta=1)) # Results from SegmentSum sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices) sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient( tf_x, shape, sorted_s, [num_segments, num_cols], x_init_value=np_x, delta=1) self.assertAllClose(unsorted_jacob_t, sorted_jacob_t) self.assertAllClose(unsorted_jacob_n, sorted_jacob_n) @test_util.run_deprecated_v1 def testBadIndices(self): # Note: GPU kernel does not return the out-of-range error needed for this # test, so this test is marked as cpu-only. # Note: With PR #13055 a negative index will be ignored silently. with self.session(use_gpu=False): for bad in [[2]], [[7]]: unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2) with self.assertRaisesOpError( r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]): self.evaluate(unsorted) @test_util.run_deprecated_v1 def testEmptySecondDimension(self): dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128] with self.session(): for dtype in dtypes: for itype in (np.int32, np.int64): data = np.zeros((2, 0), dtype=dtype) segment_ids = np.array([0, 1], dtype=itype) unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2) self.assertAllEqual(unsorted, np.zeros((2, 0), dtype=dtype)) def testDropNegatives(self): # Note: the test is done by replacing segment_ids with 8 to -1 # for index and replace values generated by numpy with 0. indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3]) num_segments = 12 for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (2,) for dtype in self.all_dtypes: with self.session(): tf_x, np_x = self._input(shape, dtype=dtype) np_ans = self._segmentReduce( indices, np_x, np.add, op2=None, num_segments=num_segments) # Replace np_ans[8] with 0 for the value np_ans[8:] = 0 # Replace 8 with -1 in indices np.place(indices, indices == 8, [-1]) s = math_ops.unsorted_segment_sum( data=tf_x, segment_ids=indices, num_segments=num_segments) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) self.assertShapeEqual(np_ans, s) class SparseSegmentReductionHelper(SegmentReductionHelper): def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32): a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype) indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32) return (constant_op.constant( indices, dtype=dtypes_lib.int32), indices, a, b) def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None, num_segments=None): return self._segmentReduce( segment_indices, x[indices], op1, op2, num_segments=num_segments) def _sparseSegmentReduceGrad(self, ygrad, indices, segment_ids, output_dim0, mode): assert mode in ("sum", "mean", "sqrtn") if mode != "sum": weights = np.zeros(ygrad.shape[0], ygrad.dtype) for segment in segment_ids: weights[segment] += 1 weights = 1. / weights if mode == "mean" else 1. / np.sqrt(weights) xgrad = np.zeros([output_dim0, ygrad.shape[1]], ygrad.dtype) for segment, index in zip(segment_ids, indices): if mode == "sum": xgrad[index] += ygrad[segment] else: xgrad[index] += ygrad[segment] * weights[segment] return xgrad class SparseSegmentReductionOpTest(SparseSegmentReductionHelper): def testValues(self): dtypes = [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64, dtypes_lib.int32 ] index_dtypes = [dtypes_lib.int32, dtypes_lib.int64] segment_ids_dtypes = [dtypes_lib.int32, dtypes_lib.int64] mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64] # Each item is np_op1, np_op2, tf_op ops_list = [(np.add, None, math_ops.sparse_segment_sum), (self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)] n = 400 # Note that the GPU implem has different paths for different inner sizes. for inner_size in [1, 2, 3, 32]: shape = [n, inner_size] segment_indices = [] for i in range(20): for _ in range(i + 1): segment_indices.append(i) num_indices = len(segment_indices) for dtype in dtypes: for index_dtype in index_dtypes: for segment_ids_dtype in segment_ids_dtypes: with self.cached_session(): tf_indices, np_indices, tf_x, np_x = self._sparse_input( shape, num_indices, dtype=dtype) for np_op1, np_op2, tf_op in ops_list: if (tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes): continue np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices, np_op1, np_op2) s = tf_op( data=tf_x, indices=math_ops.cast(tf_indices, index_dtype), segment_ids=math_ops.cast(segment_indices, segment_ids_dtype)) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) # NOTE(mrry): The static shape inference that computes # `tf_ans.shape` can only infer that sizes from dimension 1 # onwards, because the size of dimension 0 is data-dependent # and may therefore vary dynamically. self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:]) def testSegmentIdsHole(self): tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [(np.add, None, math_ops.sparse_segment_sum), ( self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)] segment_indices = [0, 2, 2, 2] tf_indices = [8, 3, 0, 9] with self.session(): for np_op1, np_op2, tf_op in ops_list: np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices, np_op1, np_op2) s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) def testWithNumSegments(self): tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [(np.add, None, math_ops.sparse_segment_sum_with_num_segments), (self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean_with_num_segments)] segment_indices = [0, 2, 2, 2] tf_indices = [8, 3, 0, 9] num_segments = 5 with self.session(): for np_op1, np_op2, tf_op in ops_list: np_ans = self._sparseSegmentReduce( np_x, tf_indices, segment_indices, np_op1, np_op2, num_segments=num_segments) s = tf_op( data=tf_x, indices=tf_indices, segment_ids=segment_indices, num_segments=num_segments) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) def testWithEmptySegments(self): tf_x = constant_op.constant([], shape=[0, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_with_num_segments, math_ops.sparse_segment_mean_with_num_segments ] segment_indices = [] tf_indices = [] num_segments = 5 with self.session(): for tf_op in ops_list: s = tf_op( data=tf_x, indices=tf_indices, segment_ids=segment_indices, num_segments=num_segments) tf_ans = self.evaluate(s) self.assertAllClose(np.zeros([5, 4]), tf_ans) @test_util.run_in_graph_and_eager_modes def testSegmentScalarIdiRaisesInvalidArgumentError(self): """Test for github #46897.""" ops_list = [ math_ops.sparse_segment_sum, math_ops.sparse_segment_mean, math_ops.sparse_segment_sqrt_n, ] for op in ops_list: with self.assertRaisesRegex( (ValueError, errors_impl.InvalidArgumentError), "Shape must be at least rank 1"): op(data=1.0, indices=[0], segment_ids=[3]) def testSegmentIdsGreaterThanZero(self): tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [(np.add, None, math_ops.sparse_segment_sum), ( self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)] segment_indices = [1, 2, 2, 2] tf_indices = [8, 3, 0, 9] with self.session(): for np_op1, np_op2, tf_op in ops_list: np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices, np_op1, np_op2) s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) tf_ans = self.evaluate(s) self.assertAllClose(np_ans, tf_ans) def testValid(self): # Baseline for the test*Invalid* methods below. tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 9] with self.session(): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) self.evaluate(s) @test_util.run_deprecated_v1 def testIndicesInvalid1(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 1, 2, 2] tf_indices = [8, -1, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"indices\[1\] == -1 out of range \[0, 10\)"): self.evaluate(s) @test_util.run_deprecated_v1 def testIndicesInvalid2(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 10] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"indices\[3\] == 10 out of range \[0, 10\)"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentsInvalid2(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 1, 0, 1] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids are not increasing"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentsInvalid3(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 1, 2, 0] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"Segment id 1 out of range \[0, 1\), possibly because " "'segment_ids' input is not sorted"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentsInvalid4(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [-1, 0, 1, 1] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"Segment id -1 out of range \[0, 2\), possibly because " "'segment_ids' input is not sorted"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentsInvalid6(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 0, 0, -1] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids must be >= 0"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentsInvalid7(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean] segment_indices = [0, 0, 0, -2] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids must be >= 0"): self.evaluate(s) def testSegmentWithNumSegmentsValid(self): # Baseline for the test*WithNumSegmentsInvalid* methods below. tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_with_num_segments, math_ops.sparse_segment_mean_with_num_segments, ] num_segments = 5 segment_indices = [0, 1, 3, 3] tf_indices = [8, 3, 0, 9] with self.session(): for tf_op in ops_list: s = tf_op( data=tf_x, indices=tf_indices, segment_ids=segment_indices, num_segments=num_segments) self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentWithNumSegmentsInvalid1(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_with_num_segments, math_ops.sparse_segment_mean_with_num_segments, ] num_segments = 5 segment_indices = [0, 1, 3, 5] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op( data=tf_x, indices=tf_indices, segment_ids=segment_indices, num_segments=num_segments) with self.assertRaisesOpError("segment ids must be < num_segments"): self.evaluate(s) @test_util.run_deprecated_v1 def testSegmentWithNumSegmentsInvalid2(self): tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_with_num_segments, math_ops.sparse_segment_mean_with_num_segments, ] num_segments = -2 segment_indices = [0, 1, 3, 3] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: with self.assertRaisesRegex( ValueError, "Cannot specify a negative value for num_segments"): tf_op( data=tf_x, indices=tf_indices, segment_ids=segment_indices, num_segments=num_segments) @test_util.run_deprecated_v1 def testGradient(self): shape = [10, 4] segment_indices = [0, 1, 2, 2] num_indices = len(segment_indices) for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]: with self.cached_session(): tf_indices, _, tf_x, np_x = self._sparse_input( shape, num_indices, dtype=dtypes_lib.float64) s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) jacob_t, jacob_n = gradient_checker.compute_gradient( tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n) @test_util.run_deprecated_v1 def testGradientWithEmptySegmentsAtEnd(self): shape = [10, 4] num_segments = 5 segment_indices = [0, 1, 2, 2] num_indices = len(segment_indices) for tf_op in [ math_ops.sparse_segment_sum_with_num_segments, math_ops.sparse_segment_mean_with_num_segments, ]: with self.cached_session(): tf_indices, _, tf_x, np_x = self._sparse_input( shape, num_indices, dtype=dtypes_lib.float64) s = tf_op( data=tf_x, indices=tf_indices, segment_ids=segment_indices, num_segments=num_segments) jacob_t, jacob_n = gradient_checker.compute_gradient( tf_x, shape, s, [5, 4], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n) def testGradientExplicit(self): # Note that the GPU implem has different paths for different inner sizes. for inner_size in (1, 2, 3, 32): with self.session(): tf_ygrad, np_ygrad = self._input([3, inner_size], dtype=dtypes_lib.float32) segment_ids = [0, 1, 2, 2, 2] indices = [8, 3, 0, 9, 3] output_dim0 = 10 ops_list = [ (math_ops.sparse_segment_sum_grad, "sum"), (math_ops.sparse_segment_mean_grad, "mean"), (math_ops.sparse_segment_sqrt_n_grad, "sqrtn"), ] for tf_op, mode in ops_list: np_xgrad = self._sparseSegmentReduceGrad(np_ygrad, indices, segment_ids, output_dim0, mode) tf_xgrad = tf_op(tf_ygrad, indices, segment_ids, output_dim0) self.assertAllClose(tf_xgrad, np_xgrad) def testGradientExplicitSingleOutput(self): # The GPU implem has a special case when there is a single output. for inner_size in (1, 2, 3, 32): with self.session(): tf_ygrad, np_ygrad = self._input([3, inner_size], dtype=dtypes_lib.float32) segment_ids = [0, 1, 2, 2, 2] indices = [0, 0, 0, 0, 0] output_dim0 = 1 ops_list = [ (math_ops.sparse_segment_sum_grad, "sum"), (math_ops.sparse_segment_mean_grad, "mean"), (math_ops.sparse_segment_sqrt_n_grad, "sqrtn"), ] for tf_op, mode in ops_list: np_xgrad = self._sparseSegmentReduceGrad(np_ygrad, indices, segment_ids, output_dim0, mode) tf_xgrad = tf_op(tf_ygrad, indices, segment_ids, output_dim0) self.assertAllClose(tf_xgrad, np_xgrad) def testGradientValid(self): # Baseline for the testGradient*Invalid* methods below. tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) self.evaluate(s) @test_util.run_deprecated_v1 def testGradientIndicesInvalid1(self): tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 10] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"): self.evaluate(s) @test_util.run_deprecated_v1 def testGradientIndicesInvalid2(self): tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, -1, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"): self.evaluate(s) @test_util.run_deprecated_v1 def testGradientSegmentsInvalid1(self): tf_x, _ = self._input( [3, 4], dtype=dtypes_lib.float32) # expecting 3 segments ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [0, 1, 1, 4] # 5 segments tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError("Invalid number of segments"): self.evaluate(s) @test_util.run_deprecated_v1 def testGradientSegmentsInvalid2(self): tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [0, 1, 2, 0] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"): self.evaluate(s) @test_util.run_deprecated_v1 def testGradientSegmentsInvalid3(self): tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [-1, 0, 1, 1] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"): self.evaluate(s) @test_util.run_deprecated_v1 def testGradientSegmentsInvalid4(self): tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32) ops_list = [ math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad ] segment_indices = [0, 1, 2, -1] tf_indices = [8, 3, 0, 9] with self.session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"): self.evaluate(s) class SegmentReductionOpBenchmark(test.Benchmark): outer_dim_options = [2**x for x in range(9, 14, 2)] ratio_options = [2**x for x in range(1, 6, 2)] inner_dim_options = [2**x for x in range(9, 14, 2)] # randomly generated sizes with less alignments inner_dim_options += [ 1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584 ] dtype_options = [np.float32, np.float64] options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options) # pylint: disable=g-long-lambda op_functors = [lambda vc, vs, seg_ids: ("sorted", math_ops.segment_sum(vc, vs)), lambda vc, vs, seg_ids: ("unsorted", math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))] # pylint: enable=g-long-lambda repeat = 10 def _npTypeToStr(self, t): if t == np.float32: return "fp32" if t == np.float64: return "fp64" def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype): output_outer_dim = int(outer_dim / ratio) const = np.random.randint(5, size=(outer_dim, inner_dim)) seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim)) vs = variables.Variable(seg_ids.astype(np.int32)) with ops.device("/gpu:0"): vc = variables.Variable(const.astype(dtype)) name, op = op_functor(vc, vs, seg_ids) with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) r = self.run_op_benchmark( sess, op, min_iters=self.repeat, name="_".join( map(str, [name, outer_dim, ratio, inner_dim, self._npTypeToStr(dtype)]))) return name, r["wall_time"] def benchmarkSegmentSumGPU(self): if not test.is_gpu_available(cuda_only=True): return for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options): op_functor = self.op_functors[0] with ops.Graph().as_default(): self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype) def benchmarkUnsortedSegmentSumGPU(self): if not test.is_gpu_available(cuda_only=True): return for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options): op_functor = self.op_functors[1] with ops.Graph().as_default(): self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype) if __name__ == "__main__": test.main()
# -*- coding: utf-8 -*- """ tipfyext.jinja2 ~~~~~~~~~~~~~~~ Jinja2 template support for Tipfy. Learn more about Jinja2 at http://jinja.pocoo.org/2/ :copyright: 2011 by tipfy.org. :license: BSD, see LICENSE.txt for more details. """ import blinker from jinja2 import Environment, FileSystemLoader, ModuleLoader from werkzeug import cached_property, import_string from tipfy.local import get_request from tipfy.routing import url_for #: Default configuration values for this module. Keys are: #: #: templates_dir #: Directory for templates. Default is `templates`. #: #: templates_compiled_target #: Target for compiled templates. If set, uses the loader for compiled #: templates in production. If it ends with a '.zip' it will be treated #: as a zip file. Default is None. #: #: force_use_compiled #: Forces the use of compiled templates even in the development server. #: #: environment_args #: Keyword arguments used to instantiate the Jinja2 environment. By #: default autoescaping is enabled and two extensions are set: #: 'jinja2.ext.autoescape' and 'jinja2.ext.with_'. For production it may #: be a godd idea to set 'auto_reload' to False -- we don't need to check #: if templates changed after deployed. #: #: after_environment_created #: [DEPRECATED: use the environment_created hook instead] #: A function called after the environment is created. Can also be defined #: as a string to be imported dynamically. Use this to set extra filters, #: global variables, extensions etc. It is called passing the environment #: as argument. default_config = { 'templates_dir': 'templates', 'templates_compiled_target': None, 'force_use_compiled': False, 'environment_args': { 'autoescape': True, 'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_'], }, 'after_environment_created': None, } class Jinja2(object): def __init__(self, app, _globals=None, filters=None): self.app = app config = app.config[__name__] kwargs = config['environment_args'].copy() enable_i18n = 'jinja2.ext.i18n' in kwargs.get('extensions', []) if not kwargs.get('loader'): templates_compiled_target = config['templates_compiled_target'] use_compiled = not app.debug or config['force_use_compiled'] if templates_compiled_target and use_compiled: # Use precompiled templates loaded from a module or zip. kwargs['loader'] = ModuleLoader(templates_compiled_target) else: # Parse templates for every new environment instances. kwargs['loader'] = FileSystemLoader(config['templates_dir']) # Initialize the environment. env = Environment(**kwargs) if _globals: env.globals.update(_globals) if filters: env.filters.update(filters) if enable_i18n: # Install i18n. from tipfy import i18n env.install_gettext_callables( lambda x: get_request().i18n.translations.ugettext(x), lambda s, p, n: get_request().i18n.translations.ungettext(s, p, n), newstyle=True) format_functions = { 'format_date': i18n.format_date, 'format_time': i18n.format_time, 'format_datetime': i18n.format_datetime, 'format_timedelta': i18n.format_timedelta, } env.globals.update(format_functions) env.filters.update(format_functions) env.globals['url_for'] = url_for after_creation_func = config['after_environment_created'] if after_creation_func: if isinstance(after_creation_func, basestring): after_creation_func = import_string(after_creation_func) after_creation_func(env) environment_created.send(self, environment=env) self.environment = env def render(self, _filename, **context): """Renders a template and returns a response object. :param _filename: The template filename, related to the templates directory. :param context: Keyword arguments used as variables in the rendered template. These will override values set in the request context. :returns: A rendered template. """ res = self.environment.get_template(_filename).render(**context) template_rendered.send(self, template=_filename, context=context, result=res) return res def render_template(self, _handler, _filename, **context): """Renders a template and returns a response object. :param _filename: The template filename, related to the templates directory. :param context: Keyword arguments used as variables in the rendered template. These will override values set in the request context. :returns: A rendered template. """ ctx = _handler.context.copy() ctx.update(context) return self.render(_filename, **ctx) def render_response(self, _handler, _filename, **context): """Returns a response object with a rendered template. :param _filename: The template filename, related to the templates directory. :param context: Keyword arguments used as variables in the rendered template. These will override values set in the request context. """ res = self.render_template(_handler, _filename, **context) return self.app.response_class(res) def get_template_attribute(self, filename, attribute): """Loads a macro (or variable) a template exports. This can be used to invoke a macro from within Python code. If you for example have a template named `_foo.html` with the following contents: .. sourcecode:: html+jinja {% macro hello(name) %}Hello {{ name }}!{% endmacro %} You can access this from Python code like this:: hello = get_template_attribute('_foo.html', 'hello') return hello('World') This function is borrowed from `Flask`. :param filename: The template filename. :param attribute: The name of the variable of macro to acccess. """ template = self.environment.get_template(filename) return getattr(template.module, attribute) @classmethod def factory(cls, _app, _name, **kwargs): if _name not in _app.registry: _app.registry[_name] = cls(_app, **kwargs) return _app.registry[_name] class Jinja2Mixin(object): """Mixin that adds ``render_template`` and ``render_response`` methods to a :class:`tipfy.RequestHandler`. It will use the request context to render templates. """ # The Jinja2 creator. jinja2_class = Jinja2 @cached_property def jinja2(self): return self.jinja2_class.factory(self.app, 'jinja2') def render_template(self, _filename, **context): return self.jinja2.render_template(self, _filename, **context) def render_response(self, _filename, **context): return self.jinja2.render_response(self, _filename, **context) """ # Example of using signals. from tipfyext.jinja2 import environment_created def setup_environment(jinja2, environment): environment.globals.update({ # ... custom globals ... }) environment.filters.update({ # ... custom filters ... }) environment_created.connect(setup_environment) """ _signals = blinker.Namespace() environment_created = _signals.signal('environment-created') template_rendered = _signals.signal('template-rendered')
# Restore the gird items and files back into a gorder server. import sys import os import pdb import girder_client import json import cPickle as pickle def copy_girder_annotation(annotation): # make a copy of the annotation because josn has trouble with # u"strings" annot_copy = {"elements":[],"name":str(annotation["name"])} ''' for e in annotation["elements"]: if e['type'] != 'view': element = {"type":str(e["type"]), \ "height":e["height"],"width":e["width"], \ "rotation":e["rotation"]} if "lineColor" in e: element["lineColor"] = str(e["lineColor"]) if "lineWidth" in e: element["lineWidth"] = float(e["lineWidth"]) #if "scalar" in e: # element["scalar"] = float(e["scalar"]) element["center"] = [e["center"][0],e["center"][1],0] annot_copy["elements"].append(element) ''' return annot_copy # really to convert unicode to strings. def copy_item(item): if type(item) is unicode: return item.encode('ascii','ignore') elif type(item) is dict: return copy_dict(item) elif type(item) is list: return copy_list(item) else: return item # really to convert strings. def copy_dict(obj): copy = {} for k in obj.keys(): if type(k) is unicode: k = k.encode('ascii','ignore') copy[k] = copy_item(obj[k]) return copy # really to convert strings. def copy_list(item_list): copy = [] for item in item_list: copy.append(copy_item(item)) return copy def restore_annotation(gc, file_path, target_id): with open(file_path, 'rb') as json_data: annots = pickle.load(json_data) for annot in annots: annot = annot['annot']['annotation'] if 'elements' in annot and len(annot['elements']) > 0: # convert unicode to strings #tmp = copy_dict(annot) tmp = copy_dict(annot) annot_name = tmp['name'] # if the annotation already exists, overwrite it. resp = gc.get('annotation', parameters={'itemId':target_id, 'name':annot_name}) if len(resp) > 0: annot_id = resp[0]['_id'] gc.put('annotation/%s'%annot_id, json=tmp) else: gc.post("annotation", parameters={"itemId":target_id}, json=tmp) # dir_path is the disk directory containg all the folders contents. # item_obj is the girder.json from dir_path. # target_id is the target girder item to update. # Precondition: An item with this id must exist in girder. # id will change, so the output id will be different than the input object. def restore_item(gc, dir_path, item_obj, target_id): # retore the folders metadata. if 'meta' in item_obj: gc.addMetadataToItem(target_id, item_obj['meta']) largeImageFileId = "" if 'largeImage' in item_obj: largeImageFileId = item_obj['largeImage']['fileId'] # Make a dictionary of preexisting files in the target item so we can # overwrite them (they will not get duplicated when restoring more # than once). files_resp = gc.get('item/%s/files'%target_id) file_dict = {} for file_obj in files_resp: file_dict[file_obj['name']] = file_obj['_id'] # handle the files and annotation for o in os.listdir(dir_path): file_path = os.path.join(dir_path, o) if os.path.isdir(file_path): # item directories have no sub directories continue if o == "annotation.pickle": restore_annotation(gc, file_path, target_id) elif o != 'girder.json': # TODO: pass in a stomp flag. (Or compare length) # Stomp on any existing file with the same name. #if o in file_dict: # gd.delete('file/%s'%file_dict[o]) # upload a file to the item if not o in file_dict: gc.uploadFileToItem(target_id, file_path) # Check and set large image activation. # TODO: point to a specific file id (need to modify dump because all we # have are names. # TODO: This fails when aout large image is on. Check to see if it isa # laready a large image. #if 'largeImage' in item_obj: # gc.post('item/%s/tiles'%target_id, parameters={'notify': 'false'}) # dir_path is the disk directory containg all the folders contents. # folder_obj is the girder.json from dir_path. # target_id is the target girder folder to update. # id will change, so the output id will be different than the input object. def restore_folder(gc, dir_path, folder_obj, target_id): # retore the folders metadata. if 'meta' in folder_obj: gc.addMetadataToFolder(target_id, folder_obj['meta']) # handle the subdirectories for o in os.listdir(dir_path): subpath = os.path.join(dir_path, o) if os.path.isdir(subpath): restore(gc,subpath, target_id) # Directories can be a folder or an item. # We do not know which until we read the girder.json file. def restore(gc, dir_path, parent_folder_id): # first read in the girder object that will tell us whether this directory # is from a girder folder or item. obj_path = os.path.join(dir_path, 'girder.json') if not os.path.isfile(obj_path): print("%s json missing"%dir_path) return with open(obj_path, 'r') as f: obj = json.load(f) if not 'name' in obj or not '_modelType' in obj: print("%s json error"%dir_path) return name = obj['name'] if obj['_modelType'] == 'item': print('item %s'%name) # If an item with this name alreay exists, use it. resp = gc.get('item', parameters={'folderId':parent_folder_id,'name':name}) if len(resp) > 0 : item_id = resp[0]['_id'] else: item = gc.createItem(parent_folder_id, name, obj['description']) item_id = item['_id'] restore_item(gc, dir_path, obj, item_id) elif obj['_modelType'] == 'folder': print('folder %s'%name) # If an folder with this name alreay exists, use it. resp = gc.get('folder', parameters={'parentId':parent_folder_id, \ 'name':name, 'parentType':'folder'}) if len(resp) > 0: folder_id = resp[0]['_id'] else: folder = gc.createFolder(parent_folder_id, name, obj['description']) folder_id = folder['_id'] restore_folder(gc, dir_path, obj, folder_id) def print_usage(): print("usage:") print("python %s serverName in_path, folder_id"%sys.argv[0]) if __name__ == '__main__': keys = {'lemon':'', \ 'wsi2': ''} urls = {'lemon':'http://lemon/api/v1', \ 'wsi2': 'http://wsi2.slide-atlas.org:8080/api/v1'} if len(sys.argv) != 4: print_usage() exit() server_name = sys.argv[1] in_path = sys.argv[2] in_path = os.path.realpath(in_path) containing_folder_id = sys.argv[3] if not server_name in keys: print("Unknown server %s"%server_name) exit() gc = girder_client.GirderClient(apiUrl=urls[server_name]) gc.authenticate('law12019', apiKey=keys[server_name]) # This is not symetric with dump. # directory arg is the parent of the dumped folder. for o in os.listdir(in_path): subpath = os.path.join(in_path, o) restore(gc, subpath, containing_folder_id)
from proto_rogue_base import * class Wall(Tile): def __init__(self): Tile.__init__(self, '#', libtcod.light_gray, True) class Floor(Tile): def __init__(self): Tile.__init__(self, ' ', libtcod.yellow, False) class Water(Tile): def __init__(self): Tile.__init__(self, '~', libtcod.light_blue, True, block_sight=False) class Stair(Tile): def __init__(self): Tile.__init__(self, '>', libtcod.white, False) class Rat(Creature): def __init__(self, x, y): Creature.__init__(self,x, y, 'rat', 'r', libtcod.sepia, 3) class Orc(Creature): def __init__(self, x, y): Creature.__init__(self,x, y, 'orc', 'o', libtcod.green, 10) class Troll(Creature): def __init__(self, x, y): Creature.__init__(self,x, y, 'troll', 'T', libtcod.darker_green, 20) self.attack=4 class Thief(Creature): def __init__(self, x, y): Creature.__init__(self,x, y, 'thief', 't', libtcod.gray, 4) self.attack = 0 def turn(self, map, player, bool): objects_at = map.getObjectsAt(self.x, self.y) took_objects = False for obj in objects_at: if isinstance(obj, Item): self.getItem(obj) took_objects = True if not took_objects: super(Thief, self).turn(map, player, bool) def hit(self, creature): if not super(Thief, self).hit(creature): return False if creature.inventory: r = libtcod.random_get_int(0, 0, len(creature.inventory)-1) loot = creature.inventory[r] self.getItem(loot) messenger.window.messageAt(self.x, self.y, self.name+' steals '+loot.name+' from '+creature.name+'.') return True class HealingPotion(Item): def __init__(self, x, y, owner): Item.__init__(self, x, y, 'healing potion', '!', libtcod.purple, owner) def use(self): self.owner.heal(10) self.owner.removeItem(self) messenger.window.message('You feel better.', libtcod.light_purple) class AttackPotion(Item): def __init__(self, x, y, owner): Item.__init__(self, x, y, 'attack potion', '!', libtcod.red, owner) def use(self): self.owner.attack+=1 self.owner.removeItem(self) messenger.window.message('You feel more powerful. Attack +1!', libtcod.red) class DefensePotion(Item): def __init__(self, x, y, owner): Item.__init__(self, x, y, 'defense potion', '!', libtcod.dark_green, owner) def use(self): self.owner.defense+=1 self.owner.removeItem(self) messenger.window.message('You feel safer. Defense +1!', libtcod.dark_green) class Map(MapBase): def __init__(self, width, height, difficulty): MapBase.__init__(self, width, height) self.difficulty=difficulty def create_room(self, room): if libtcod.random_get_int(0, 0, 4)==4: if libtcod.random_get_int(0, 0, 1)==1: # Maybe make the edges flooded for x in range(room.x1, room.x2+1): for y in range(room.y1, room.y2+1): self.map[x][y] = Water() else: # Maybe make the walls crumbled for x in range(room.x1, room.x2+1): for y in range(room.y1, room.y2+1): if libtcod.random_get_int(0, 0, 2)==2: self.map[x][y] = Floor() #go through the tiles in the rectangle and make them passable for x in range(room.x1 + 1, room.x2): for y in range(room.y1 + 1, room.y2): self.map[x][y] = Floor() def place_items(self, room): #choose random number of items num_items = libtcod.random_get_int(0, 0, MAX_ROOM_ITEMS) for i in range(num_items): #choose random spot for this item x = libtcod.random_get_int(0, room.x1+1, room.x2-1) y = libtcod.random_get_int(0, room.y1+1, room.y2-1) #only place it if the tile is not blocked if not self.is_blocked(x, y): item_type=libtcod.random_get_int(0, 0, 3) if item_type==0 or item_type==1: #create a healing potion self.getItem(HealingPotion(x, y, None)) elif item_type==2: self.getItem(AttackPotion(x, y, None)) elif item_type==3: self.getItem(DefensePotion(x, y, None)) def place_monsters(self, room): #choose random number of monsters num_monsters = libtcod.random_get_int(0, 0, MAX_ROOM_MONSTERS) for i in range(num_monsters): #choose random spot for this monster x = libtcod.random_get_int(0, room.x1+1, room.x2-1) y = libtcod.random_get_int(0, room.y1+1, room.y2-1) #only place it if the tile is not blocked if not self.is_blocked(x, y): if libtcod.random_get_int(0, 0, 5)==4: self.objects.append(Troll(x, y)) else: if libtcod.random_get_int(0, 0, 4)==0: self.objects.append(Orc(x, y)) else: if libtcod.random_get_int(0, 0, 2)==0: self.objects.append(Thief(x, y)) else: self.objects.append(Rat(x, y)) def create_h_tunnel(self, x1, x2, y): #horizontal tunnel. min() and max() are used in case x1>x2 for x in range(min(x1, x2), max(x1, x2) + 1): self.map[x][y]=Floor() def create_v_tunnel(self, y1, y2, x): #vertical tunnel for y in range(min(y1, y2), max(y1, y2) + 1): self.map[x][y]=Floor() def generate_map(self): #fill map with "blocked" tiles self.map = [[ Wall() for y in range(MAP_HEIGHT) ] for x in range(MAP_WIDTH) ] new_x=0 new_y=0 rooms = [] num_rooms = 0 for r in range(MAX_ROOMS): #random width and height w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE) h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE) #random position without going out of the boundaries of the map x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1) y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1) #"Rect" class makes rectangles easier to work with new_room = Rect(x, y, w, h) #run through the other rooms and see if they intersect with this one failed = False for other_room in rooms: if new_room.intersect(other_room): failed = True break if not failed: #this means there are no intersections, so this room is valid #"paint" it to the map's tiles self.create_room(new_room) self.place_items(new_room) self.place_monsters(new_room) #center coordinates of new room, will be useful later (new_x, new_y) = new_room.center() if num_rooms == 0: #this is the first room, where the player starts at self.startx = new_x self.starty = new_y else: #all rooms after the first: #connect it to the previous room with a tunnel #center coordinates of previous room (prev_x, prev_y) = rooms[num_rooms-1].center() #draw a coin (random number that is either 0 or 1) if libtcod.random_get_int(0, 0, 1) == 1: #first move horizontally, then vertically self.create_h_tunnel(prev_x, new_x, prev_y) self.create_v_tunnel(prev_y, new_y, new_x) else: #first move vertically, then horizontally self.create_v_tunnel(prev_y, new_y, prev_x) self.create_h_tunnel(prev_x, new_x, new_y) #finally, append the new room to the list rooms.append(new_room) num_rooms += 1 #create stairs at the center of the last room self.map[new_x][new_y] = Stair()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from msrest.polling import LROPoller, NoPolling from msrestazure.polling.arm_polling import ARMPolling from .. import models class VirtualMachineScaleSetsOperations(object): """VirtualMachineScaleSetsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2016-03-30". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2016-03-30" self.config = config def _create_or_update_initial( self, resource_group_name, name, parameters, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.create_or_update.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("name", name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(parameters, 'VirtualMachineScaleSet') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 201]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineScaleSet', response) if response.status_code == 201: deserialized = self._deserialize('VirtualMachineScaleSet', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def create_or_update( self, resource_group_name, name, parameters, custom_headers=None, raw=False, polling=True, **operation_config): """Create or update a VM scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param name: The name of the VM scale set to create or update. :type name: str :param parameters: The scale set object. :type parameters: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSet :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns VirtualMachineScaleSet or ClientRawResponse<VirtualMachineScaleSet> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSet] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSet]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, name=name, parameters=parameters, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('VirtualMachineScaleSet', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}'} def _delete_initial( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.delete.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes a VM scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._delete_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}'} def get( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, **operation_config): """Display information about a virtual machine scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: VirtualMachineScaleSet or ClientRawResponse if raw=true :rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSet or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = self.get.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineScaleSet', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}'} def _deallocate_initial( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, **operation_config): vm_instance_ids = None if instance_ids is not None: vm_instance_ids = models.VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids) # Construct URL url = self.deallocate.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body if vm_instance_ids is not None: body_content = self._serialize.body(vm_instance_ids, 'VirtualMachineScaleSetVMInstanceIDs') else: body_content = None # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def deallocate( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, polling=True, **operation_config): """Deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases the compute resources. You are not billed for the compute resources that this virtual machine scale set deallocates. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. :type instance_ids: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._deallocate_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, instance_ids=instance_ids, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate'} def _delete_instances_initial( self, resource_group_name, vm_scale_set_name, instance_ids, custom_headers=None, raw=False, **operation_config): vm_instance_ids = models.VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) # Construct URL url = self.delete_instances.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(vm_instance_ids, 'VirtualMachineScaleSetVMInstanceRequiredIDs') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete_instances( self, resource_group_name, vm_scale_set_name, instance_ids, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes virtual machines in a VM scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param instance_ids: The virtual machine scale set instance ids. :type instance_ids: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._delete_instances_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, instance_ids=instance_ids, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete_instances.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete'} def get_instance_view( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, **operation_config): """Gets the status of a VM scale set instance. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: VirtualMachineScaleSetInstanceView or ClientRawResponse if raw=true :rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetInstanceView or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = self.get_instance_view.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineScaleSetInstanceView', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get_instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView'} def list( self, resource_group_name, custom_headers=None, raw=False, **operation_config): """Gets a list of all VM scale sets under a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of VirtualMachineScaleSet :rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSet] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.VirtualMachineScaleSetPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.VirtualMachineScaleSetPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets'} def list_all( self, custom_headers=None, raw=False, **operation_config): """Gets a list of all VM Scale Sets in the subscription, regardless of the associated resource group. Use nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is null to fetch all the VM Scale Sets. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of VirtualMachineScaleSet :rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSet] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_all.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.VirtualMachineScaleSetPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.VirtualMachineScaleSetPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets'} def list_skus( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, **operation_config): """Gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances allowed for each SKU. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of VirtualMachineScaleSetSku :rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetSkuPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetSku] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_skus.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.VirtualMachineScaleSetSkuPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.VirtualMachineScaleSetSkuPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus'} def _power_off_initial( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, **operation_config): vm_instance_ids = None if instance_ids is not None: vm_instance_ids = models.VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids) # Construct URL url = self.power_off.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body if vm_instance_ids is not None: body_content = self._serialize.body(vm_instance_ids, 'VirtualMachineScaleSetVMInstanceIDs') else: body_content = None # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def power_off( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, polling=True, **operation_config): """Power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached and you are getting charged for the resources. Instead, use deallocate to release resources and avoid charges. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. :type instance_ids: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._power_off_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, instance_ids=instance_ids, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff'} def _restart_initial( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, **operation_config): vm_instance_ids = None if instance_ids is not None: vm_instance_ids = models.VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids) # Construct URL url = self.restart.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body if vm_instance_ids is not None: body_content = self._serialize.body(vm_instance_ids, 'VirtualMachineScaleSetVMInstanceIDs') else: body_content = None # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def restart( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, polling=True, **operation_config): """Restarts one or more virtual machines in a VM scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. :type instance_ids: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._restart_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, instance_ids=instance_ids, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart'} def _start_initial( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, **operation_config): vm_instance_ids = None if instance_ids is not None: vm_instance_ids = models.VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids) # Construct URL url = self.start.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body if vm_instance_ids is not None: body_content = self._serialize.body(vm_instance_ids, 'VirtualMachineScaleSetVMInstanceIDs') else: body_content = None # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def start( self, resource_group_name, vm_scale_set_name, instance_ids=None, custom_headers=None, raw=False, polling=True, **operation_config): """Starts one or more virtual machines in a VM scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param instance_ids: The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. :type instance_ids: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._start_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, instance_ids=instance_ids, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start'} def _update_instances_initial( self, resource_group_name, vm_scale_set_name, instance_ids, custom_headers=None, raw=False, **operation_config): vm_instance_ids = models.VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) # Construct URL url = self.update_instances.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(vm_instance_ids, 'VirtualMachineScaleSetVMInstanceRequiredIDs') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def update_instances( self, resource_group_name, vm_scale_set_name, instance_ids, custom_headers=None, raw=False, polling=True, **operation_config): """Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param instance_ids: The virtual machine scale set instance ids. :type instance_ids: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._update_instances_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, instance_ids=instance_ids, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) update_instances.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade'} def _reimage_initial( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.reimage.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def reimage( self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, polling=True, **operation_config): """Reimages (upgrade the operating system) one or more virtual machines in a VM scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns OperationStatusResponse or ClientRawResponse<OperationStatusResponse> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._reimage_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage'}
# coding: utf-8 # # Read Garmin GPX with heartrate # # In[1]: import numpy as np import matplotlib.pyplot as plt from datetime import datetime import pandas as pd from lxml import etree get_ipython().magic(u'matplotlib inline') # In[2]: fn = "activity_721671330.gpx" tree = etree.parse(fn) # In[3]: namespace = {'def': 'http://www.topografix.com/GPX/1/1', 'gpxtpx': 'http://www.garmin.com/xmlschemas/TrackPointExtension/v1', 'gpxx': 'http://www.garmin.com/xmlschemas/GpxExtensions/v3', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance' } # Within `trk` tags, find `trkpt` elements get element values # In[4]: elist = tree.xpath('./def:trk//def:trkpt',namespaces=namespace) lonlat = [e.values() for e in elist] lon = np.array([float(i[0]) for i in lonlat]) lat = np.array([float(i[1]) for i in lonlat]) print lon[0],lat[0],np.shape(lon) # Within `trk` tags, find `time` elements and get element text # In[5]: elist = tree.xpath('./def:trk//def:time',namespaces=namespace) fmt = '%Y-%m-%dT%H:%M:%S.%fZ' time = [datetime.strptime(d.text, fmt) for d in elist] print time[0], np.shape(time) # Within `trk` tags, find `hr` elements and get element text. CRS changed this to return an array of floats. # In[6]: elist = tree.xpath("./def:trk//gpxtpx:hr", namespaces=namespace) hr = np.array([float(e.text) for e in elist]) print hr[0], np.shape(hr) # Make the dataframe # In[7]: df = pd.DataFrame.from_dict(dict(time=time, lon=lon, lat=lat, hr=hr)) df.set_index('time', drop=True, inplace=True) # In[8]: df.head(5) # Plot the heartrate # In[9]: df['hr'].plot(figsize=(12,4)); # Calculate speed, effort, and efficiency. I have not figured out how to do this in Pandas, or how to avoid the loop when calculating time differences. The .total_seconds() conversion does not work on np arrays of datetime.deltatime objects. # In[10]: latr = np.radians(lat) lonr = np.radians(lon) dlatr = np.diff(latr) dlonr = np.diff(lonr) # Haversine formula for great circle a = np.sin(dlatr/2.)**2 + np.cos(latr[0:-1]) * np.cos(latr[1:]) * np.sin(dlonr/2.)**2 c = 2. * np.arcsin(np.sqrt(a)) distm = 6367e3 * c # distm is in meters. print "distm",distm[0], np.shape(distm) # this produces an array of datetime.deltatime objects difft = np.diff(time) print "np.diff(time)",difft[5].total_seconds(), np.shape(difft) # there must be a better way: dt = np.zeros_like(difft) for i in np.arange(len(difft)): dt[i]= float(difft[i].total_seconds()) etime = np.cumsum(dt) speed = distm/dt # calculate effort as fraction of usable hr range hr_rest = 68. # resting rate hr_ana = 162. # anaerobic threshold effort = (hr-hr_rest)/(hr_ana-hr_rest) # calculate efficiency eff = speed/effort[1:] print "Effort: ",effort[0], type(effort), np.shape(effort) fig = plt.figure(figsize=(12,4)) plt.plot(etime/60.,speed,label='Speed') plt.plot(etime/60.,eff,label='Efficiency') plt.ylabel('m/s; m/s/effort') plt.xlabel('Elapsed time (minutes)') plt.legend() # Plot lon/lat with Cartopy # In[11]: import cartopy.crs as ccrs from cartopy.io.img_tiles import MapQuestOpenAerial geodetic = ccrs.Geodetic(globe=ccrs.Globe(datum='WGS84')) b=np.array([lon.min(), lat.min(), lon.max(), lat.max()]) plt.figure(figsize=(12,12)) # Open Source Imagery from MapQuest (max zoom = 16?) tiler = MapQuestOpenAerial() # Open Street Map (max zoom = 18?) #tiler = OSM() ax = plt.axes(projection=tiler.crs) dx=b[2]-b[0] dy=b[3]-b[1] extent = (b[0]-0.1*dx,b[2]+0.1*dx,b[1]-0.1*dy,b[3]+0.1*dy) ax.set_extent(extent, geodetic) ax.add_image(tiler, 14) plt.plot(lon[1:],lat[1:],'m-',transform=ccrs.PlateCarree()); # sheesh, this is embarassing # 1) clip lat/lon and hr to length of other stuff (should actually interpolate to centerpoint) lons = lon[1:] lats = lat[1:] # this does not work: # plt.plot(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff); # nor does this: # for i in np.arange(len(lons)): # plt.plot(lons[i],lats[i],transform=ccrs.PlateCarree(),marker='o',c=eff[i]); # ax.scatter(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff); # ax.scatter(lons,lats,marker='o',c=eff); gl=ax.gridlines(draw_labels=True) gl.xlabels_top = False gl.ylabels_right = False # In[12]: eff = speed/effort[1:] import cartopy.crs as ccrs from cartopy.io.img_tiles import MapQuestOpenAerial geodetic = ccrs.Geodetic(globe=ccrs.Globe(datum='WGS84')) b=np.array([lon.min(), lat.min(), lon.max(), lat.max()]) plt.figure(figsize=(12,12)) # Open Source Imagery from MapQuest (max zoom = 16?) tiler = MapQuestOpenAerial() # Open Street Map (max zoom = 18?) #tiler = OSM() ax = plt.axes(projection=tiler.crs) dx=b[2]-b[0] dy=b[3]-b[1] extent = (b[0]-0.1*dx,b[2]+0.1*dx,b[1]-0.1*dy,b[3]+0.1*dy) ax.set_extent(extent, geodetic) ax.add_image(tiler, 14) # sheesh, this is embarassing # 1) clip lat/lon and hr to length of other stuff (should actually interpolate to centerpoint) lons = lon[1:] lats = lat[1:] # this does not work: # plt.plot(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff); # nor does this: # for i in np.arange(len(lons)): # plt.plot(lons[i],lats[i],transform=ccrs.PlateCarree(),marker='o',c=eff[i]); kw = dict(alpha=0.5, lw=0 ) ax.scatter(lons,lats,transform=ccrs.PlateCarree(),marker='o',c=eff.tolist(),**kw); # ax.scatter(lons,lats,marker='o',c=eff); gl=ax.gridlines(draw_labels=True) gl.xlabels_top = False gl.ylabels_right = False # In[13]: se = (eff - eff.min()) / eff.ptp()
#!/usr/bin/env python # -*- coding: utf-8 -*- import fnmatch import re from unittest import TestCase import mock from specchio.handlers import SpecchioEventHandler from watchdog.events import (DirCreatedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent) class SpecchioEventHandlerTest(TestCase): def setUp(self): with mock.patch.object( SpecchioEventHandler, "init_gitignore" ) as _init_gitignore: _init_gitignore.return_value = True self.handler = SpecchioEventHandler( src_path="/a/", dst_ssh="user@host", dst_path="/b/a/" ) self.handler.init_gitignore = mock.Mock() self.handler.gitignore_dict = { "/a/.gitignore": { 1: [], 2: [re.compile(fnmatch.translate("1.py"))], 3: [re.compile(fnmatch.translate("test.py")), re.compile(fnmatch.translate("t_folder/"))] } } self.handler.gitignore_list = ["/a/"] def test_specchio_init_with_init_remote(self): with mock.patch.object( SpecchioEventHandler, "init_gitignore" ) as _init_gitignore: with mock.patch.object( SpecchioEventHandler, "init_remote" ) as _init_remote: _init_gitignore.return_value = True _init_remote.return_value = True SpecchioEventHandler( src_path="/a/", dst_ssh="user@host", dst_path="/b/a/", is_init_remote=True ) _init_remote.assert_called_once_with() def test_is_ignore_git_folder(self): _file_or_dir_path = mock.Mock("/a/") _file_or_dir_path.endswith.return_value = True _file_or_dir_path.startswith.return_value = True result = self.handler.is_ignore(_file_or_dir_path, True) self.assertEqual(result, True) _file_or_dir_path.startswith.called_once_with(self.handler.git_path) @mock.patch("specchio.handlers.walk_get_gitignore") @mock.patch("specchio.handlers.get_all_re") def test_init_gitignore(self, _get_all_re, _walk_get_gitignore): _walk_get_gitignore.return_value = ["/a/.gitignore"] _get_all_re.return_value = { "/a/.gitignore": { 1: [], 2: [re.compile(fnmatch.translate("1.py"))], 3: [re.compile(fnmatch.translate("test.py"))] } } handler = SpecchioEventHandler( src_path="/a/", dst_ssh="user@host", dst_path="/b/a/" ) _walk_get_gitignore.called_once_with("/a/") self.assertEqual(handler.gitignore_list, ["/a/"]) self.assertEqual(handler.gitignore_dict, { "/a/.gitignore": { 1: [], 2: [re.compile(fnmatch.translate("1.py"))], 3: [re.compile(fnmatch.translate("test.py"))] } }) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_create_folder") def test_on_created_folder(self, _remote_create_folder, _os): _remote_create_folder.return_value = True _os.path.abspath.return_value = "/a/test1.py" _os.path.join.return_value = "/b/a/test1.py" _event = DirCreatedEvent(src_path="/a/test1.py") self.handler.on_created(_event) _remote_create_folder.assert_called_once_with( dst_ssh=self.handler.dst_ssh, dst_path="/b/a/test1.py" ) @mock.patch("specchio.handlers.remote_create_folder") @mock.patch("specchio.handlers.rsync") @mock.patch("specchio.handlers.os") def test_on_created_file(self, _os, _rsync, _remote_create_folder): with mock.patch.object( self.handler, "update_gitignore" ) as _update_gitignore: _os.path.abspath.return_value = "/a/.gitignore" _rsync.return_value = True _update_gitignore.return_value = True _remote_create_folder.return_value = True _os.path.join.return_value = "/b/a/.gitignore" _event = FileCreatedEvent(src_path="/a/.gitignore") self.handler.on_created(_event) _remote_create_folder.assert_called_once_with( dst_ssh=self.handler.dst_ssh, dst_path="/b/a/" ) _rsync.assert_called_once_with(dst_ssh=self.handler.dst_ssh, dst_path="/b/a/.gitignore", src_path="/a/.gitignore") _update_gitignore.assert_called_once_with("/a/.gitignore") @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_create_folder") def test_on_created_ignore(self, _remote_create_folder, _os): _remote_create_folder.return_value = True _os.path.abspath.return_value = "/a/test.py" _event = FileCreatedEvent(src_path="/a/test.py") self.handler.on_created(_event) self.assertEqual(_remote_create_folder.call_count, 0) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_create_folder") @mock.patch("specchio.handlers.rsync") def test_on_modified(self, _rsync, _remote_create_folder, _os): with mock.patch.object( self.handler, "update_gitignore" ) as _update_gitignore: _rsync.return_value = True _remote_create_folder.return_value = True _os.path.abspath.return_value = "/a/.gitignore" _os.path.join.return_value = "/b/a/.gitignore" _update_gitignore.return_value = True _event = FileModifiedEvent(src_path="/a/.gitignore") self.handler.on_modified(_event) _rsync.assert_called_once_with( dst_ssh=self.handler.dst_ssh, src_path="/a/.gitignore", dst_path="/b/a/.gitignore" ) _remote_create_folder.assert_called_once_with( dst_ssh=self.handler.dst_ssh, dst_path="/b/a/" ) _update_gitignore.assert_called_once_with( "/a/.gitignore" ) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_create_folder") @mock.patch("specchio.handlers.rsync") def test_on_modifited_ignore(self, _rsync, _remote_create_folder, _os): _rsync.return_value = True _remote_create_folder.return_value = True _os.path.abspath.return_value = "/a/test.py" _event = FileModifiedEvent(src_path="/a/test.py") self.handler.on_modified(_event) self.assertEqual(_remote_create_folder.call_count, 0) self.assertEqual(_rsync.call_count, 0) @mock.patch("specchio.handlers.get_all_re") def test_update_gitignore(self, _get_all_re): _get_all_re.return_value = { "/a/b/.gitignore": { 1: [], 2: [], 3: [] } } _handler_gitignore_list = list(self.handler.gitignore_list) _handler_gitignore_dict = dict(self.handler.gitignore_dict) self.handler.update_gitignore("/a/b/.gitignore") self.assertEqual( self.handler.gitignore_list, ["/a/b/", "/a/"] ) self.assertEqual( self.handler.gitignore_dict, { "/a/.gitignore": { 1: [], 2: [re.compile(fnmatch.translate("1.py"))], 3: [re.compile(fnmatch.translate("test.py")), re.compile(fnmatch.translate("t_folder/"))] }, "/a/b/.gitignore": { 1: [], 2: [], 3: [] } } ) self.handler.gitignore_list = _handler_gitignore_list self.handler.gitignore_dict = _handler_gitignore_dict def test_del_gitignore(self): _handler_gitignore_list = list(self.handler.gitignore_list) _handler_gitignore_dict = dict(self.handler.gitignore_dict) self.handler.del_gitignore("/a/.gitignore") self.assertEqual( self.handler.gitignore_list, [] ) self.assertEqual( self.handler.gitignore_dict, {} ) self.handler.gitignore_list = _handler_gitignore_list self.handler.gitignore_dict = _handler_gitignore_dict @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_rm") def test_on_deleted(self, _remote_rm, _os): with mock.patch.object( self.handler, "del_gitignore" ) as _del_gitignore: _os.path.abspath.return_value = "/a/.gitignore" _os.path.join.return_value = "/b/a/.gitignore" _remote_rm.return_value = True _del_gitignore.return_value = True _event = FileDeletedEvent(src_path="/a/.gitignore") self.handler.on_deleted(_event) _os.path.abspath.assert_called_once_with("/a/.gitignore") _os.path.join.assert_called_once_with( "/b/a/", ".gitignore" ) _remote_rm.assert_called_once_with( dst_ssh=self.handler.dst_ssh, dst_path="/b/a/.gitignore" ) _del_gitignore.assert_called_once_with("/a/.gitignore") @mock.patch("specchio.handlers.os") def test_on_delete_ignore(self, _os): _os.path.abspath.return_value = "/a/test.py" _event = FileDeletedEvent(src_path="/a/test.py") self.handler.on_deleted(_event) _os.path.abspath.assert_called_once_with( "/a/test.py" ) self.assertEqual(_os.path.join.call_count, 0) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_mv") def test_on_moved(self, _mv, _os): _mv.return_value = True _os.path.abspath.side_effect = ["/a/1.py", "/a/2.py"] _os.path.join.side_effect = ["/b/a/1.py", "/b/a/2.py"] _event = FileMovedEvent(src_path="/a/1.py", dest_path="/a/2.py") self.handler.on_moved(_event) _mv.assert_called_once_with( dst_ssh=self.handler.dst_ssh, src_path="/b/a/1.py", dst_path="/b/a/2.py" ) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_rm") @mock.patch("specchio.handlers.remote_create_folder") @mock.patch("specchio.handlers.remote_mv") @mock.patch("specchio.handlers.rsync") def test_on_moved_all_ignore(self, _rsync, _mv, _create_folder, _rm, _os): _mv.return_value = True _create_folder.return_value = True _rm.return_value = True _rsync.return_value = True _os.path.abspath.side_effect = ["/a/test.py", "/a/test.py"] _os.path.join.side_effect = ["/b/a/test.py", "/b/a/test.py"] _event = FileMovedEvent(src_path="/a/test.py", dest_path="/a/test.py") self.handler.on_moved(_event) self.assertEqual(_mv.call_count, 0) self.assertEqual(_rm.call_count, 0) self.assertEqual(_create_folder.call_count, 0) self.assertEqual(_rsync.call_count, 0) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_create_folder") @mock.patch("specchio.handlers.rsync") def test_on_moved_src_ignore(self, _rsync, _create_folder, _os): _create_folder.return_value = True _rsync.return_value = True _os.path.abspath.side_effect = ["/a/test.py", "/a/1.py"] _os.path.join.side_effect = ["/b/a/test.py", "/b/a/1.py"] _event = FileMovedEvent(src_path="test.py", dest_path="/a/1.py") self.handler.on_moved(_event) _create_folder.assert_called_once_with( dst_ssh=self.handler.dst_ssh, dst_path="/b/a/" ) _rsync.assert_called_once_with( dst_ssh=self.handler.dst_ssh, src_path="/a/1.py", dst_path="/b/a/1.py" ) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.remote_rm") def test_on_moved_dst_ignore(self, _rm, _os): _rm.return_value = True _os.path.abspath.side_effect = ["/a/1.py", "/a/test.py"] _os.path.join.side_effect = ["/b/a/1.py", "/b/a/test.py"] _event = FileMovedEvent(src_path="1.py", dest_path="/a/test.py") self.handler.on_moved(_event) _rm.assert_called_once_with( dst_ssh=self.handler.dst_ssh, dst_path="/b/a/1.py" ) @mock.patch("specchio.handlers.os") @mock.patch("specchio.handlers.rsync_multi") def test_init_remote(self, _rsync_multi, _os): _os.walk.return_value = [ ["/a/", [], ["1.py", "2.py"]], ["/a/t_folder/", [], []] ] _os.path.abspath.side_effect = [ "/a", "/a/1.py", "/a/2.py", "/a/t_folder" ] _os.path.join.side_effect = [ "/a/1.py", "/a/2.py" ] _rsync_multi.return_value = True with mock.patch.object(self.handler, "is_ignore") as _is_ignore: _is_ignore.side_effect = [False, True, False, True] self.handler.init_remote() _rsync_multi.assert_called_once_with( dst_ssh=self.handler.dst_ssh, folder_path=self.handler.src_path, src_paths=["2.py"], dst_path=self.handler.dst_path )
from jsonrpc import ServiceProxy import sys import string # ===== BEGIN USER SETTINGS ===== # if you do not set these you will be prompted for a password for every command rpcuser = "" rpcpass = "" # ====== END USER SETTINGS ====== if rpcpass == "": access = ServiceProxy("http://127.0.0.1:18410") else: access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:18410") cmd = sys.argv[1].lower() if cmd == "backupwallet": try: path = raw_input("Enter destination path/filename: ") print access.backupwallet(path) except: print "\n---An error occurred---\n" elif cmd == "getaccount": try: addr = raw_input("Enter a Oxcoin address: ") print access.getaccount(addr) except: print "\n---An error occurred---\n" elif cmd == "getaccountaddress": try: acct = raw_input("Enter an account name: ") print access.getaccountaddress(acct) except: print "\n---An error occurred---\n" elif cmd == "getaddressesbyaccount": try: acct = raw_input("Enter an account name: ") print access.getaddressesbyaccount(acct) except: print "\n---An error occurred---\n" elif cmd == "getbalance": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getbalance(acct, mc) except: print access.getbalance() except: print "\n---An error occurred---\n" elif cmd == "getblockbycount": try: height = raw_input("Height: ") print access.getblockbycount(height) except: print "\n---An error occurred---\n" elif cmd == "getblockcount": try: print access.getblockcount() except: print "\n---An error occurred---\n" elif cmd == "getblocknumber": try: print access.getblocknumber() except: print "\n---An error occurred---\n" elif cmd == "getconnectioncount": try: print access.getconnectioncount() except: print "\n---An error occurred---\n" elif cmd == "getdifficulty": try: print access.getdifficulty() except: print "\n---An error occurred---\n" elif cmd == "getgenerate": try: print access.getgenerate() except: print "\n---An error occurred---\n" elif cmd == "gethashespersec": try: print access.gethashespersec() except: print "\n---An error occurred---\n" elif cmd == "getinfo": try: print access.getinfo() except: print "\n---An error occurred---\n" elif cmd == "getnewaddress": try: acct = raw_input("Enter an account name: ") try: print access.getnewaddress(acct) except: print access.getnewaddress() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaccount": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaccount(acct, mc) except: print access.getreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaddress": try: addr = raw_input("Enter a Oxcoin address (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaddress(addr, mc) except: print access.getreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "gettransaction": try: txid = raw_input("Enter a transaction ID: ") print access.gettransaction(txid) except: print "\n---An error occurred---\n" elif cmd == "getwork": try: data = raw_input("Data (optional): ") try: print access.gettransaction(data) except: print access.gettransaction() except: print "\n---An error occurred---\n" elif cmd == "help": try: cmd = raw_input("Command (optional): ") try: print access.help(cmd) except: print access.help() except: print "\n---An error occurred---\n" elif cmd == "listaccounts": try: mc = raw_input("Minimum confirmations (optional): ") try: print access.listaccounts(mc) except: print access.listaccounts() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaccount": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaccount(mc, incemp) except: print access.listreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaddress": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaddress(mc, incemp) except: print access.listreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "listtransactions": try: acct = raw_input("Account (optional): ") count = raw_input("Number of transactions (optional): ") frm = raw_input("Skip (optional):") try: print access.listtransactions(acct, count, frm) except: print access.listtransactions() except: print "\n---An error occurred---\n" elif cmd == "move": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.move(frm, to, amt, mc, comment) except: print access.move(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendfrom": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendfrom(frm, to, amt, mc, comment, commentto) except: print access.sendfrom(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendmany": try: frm = raw_input("From: ") to = raw_input("To (in format address1:amount1,address2:amount2,...): ") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.sendmany(frm,to,mc,comment) except: print access.sendmany(frm,to) except: print "\n---An error occurred---\n" elif cmd == "sendtoaddress": try: to = raw_input("To (in format address1:amount1,address2:amount2,...): ") amt = raw_input("Amount:") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendtoaddress(to,amt,comment,commentto) except: print access.sendtoaddress(to,amt) except: print "\n---An error occurred---\n" elif cmd == "setaccount": try: addr = raw_input("Address: ") acct = raw_input("Account:") print access.setaccount(addr,acct) except: print "\n---An error occurred---\n" elif cmd == "setgenerate": try: gen= raw_input("Generate? (true/false): ") cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") try: print access.setgenerate(gen, cpus) except: print access.setgenerate(gen) except: print "\n---An error occurred---\n" elif cmd == "settxfee": try: amt = raw_input("Amount:") print access.settxfee(amt) except: print "\n---An error occurred---\n" elif cmd == "stop": try: print access.stop() except: print "\n---An error occurred---\n" elif cmd == "validateaddress": try: addr = raw_input("Address: ") print access.validateaddress(addr) except: print "\n---An error occurred---\n" elif cmd == "walletpassphrase": try: pwd = raw_input("Enter wallet passphrase: ") access.walletpassphrase(pwd, 60) print "\n---Wallet unlocked---\n" except: print "\n---An error occurred---\n" elif cmd == "walletpassphrasechange": try: pwd = raw_input("Enter old wallet passphrase: ") pwd2 = raw_input("Enter new wallet passphrase: ") access.walletpassphrasechange(pwd, pwd2) print print "\n---Passphrase changed---\n" except: print print "\n---An error occurred---\n" print else: print "Command not found or not supported"
# -*- coding: utf-8 -*- """Testing module provides unittest by pre-defined testing syntax. You can define testing syntax as method `docstring`, **`.. test::` indicates the beginning of testing syntaxes.** Run Test: .. testcode:: $ dp4p test --path=./example Available syntax: * **expect(criteria)** * **expect(priority, rules)** * **!expect(criteria)** * **!expect(priority, rules)** .. testcode:: rules: controller: code, text, json, args(arguments separated by /), params(query string, form values) model, helper: int, long, bool, str, json, args (arguments by list), kwargs (arguments by dict) .. testcode:: from dp_tornado.engine.controller import Controller as dpController from dp_tornado.engine.model import Model as dpModel from dp_tornado.engine.helper import Helper as dpHelper class FooController(dpController): def get(self): \""" .. test:: expect( 1, code=200, text='foo==bar', params={'foo': 'bar'}) \""" foo = self.get_argument('foo') if foo == 'bar': return self.finish('foo==bar') self.finish('done') def post(self): \""" .. test:: expect(code=400, params={'foo': 'bar'}) \""" foo = self.get_argument('foo') if foo == 'bar': return self.finish_with_error(400) self.finish('done') class FooController(dpController): def get(self): \""" Test with params and expect json value. .. test:: expect(json="{'foo':'bar'}", params={'foo': 'bar'}) \""" foo = self.get_argument('foo') return self.finish({'foo': bar) class FooController(dpController): def get(self): \""" Test with params and expect json value. .. test:: expect(json={'foo':'bar'}, params={'foo': 'bar'}) \""" foo = self.get_argument('foo') return self.finish({'foo': bar) class FooModel(dpModel): def foobar(self, foo): \""" Test with kwargs arguments. .. test:: expect(int=100, kwargs={'foo': 'bar'}) \""" if foo == 'bar': return int(100) return 0 class FooModel(dpModel): def foobar(self, foo): \""" Test with kwargs arguments. .. test:: expect(long=100, kwargs={'foo': 'bar'}) expect(long=0, kwargs={'foo': 'foo'}) !expect(long=0, kwargs={'foo': 'bar'}) \""" if foo == 'bar': if self.e.helper.misc.system.py_version <= 2: return long(100) else: return int(100) if self.e.helper.misc.system.py_version <= 2: return long(0) else: return int(0) class FooHelper(dpHelper): def foobar(self, foo, bar): \""" Test with args arguments. .. test:: expect(bool=True, args={'foo': 'bar'}) !expect(bool=False, args={'foo': 'bar'}) \""" foo = self.get_argument('foo') if foo == 'bar': return True return False """ import os import sys import time import subprocess from .engine import Engine as dpEngine class Testing(dpEngine): def __init__(self, app_module, path, doctest=True): self.app_module = app_module self.app_identifier = 'dp-tornado-testing-9x48923' self.app_port = 48923 self.path = path self.tests = {} self.modules = {} self.doctest = doctest for e in ('controller', 'model', 'helper'): self.tests[e] = [] @property def dev_null(self): return open(os.devnull, 'w') def server_start(self, disable_logging=True): self.server_stop() kwargs = {} if disable_logging: kwargs = { 'stdout': self.dev_null, 'stderr': subprocess.STDOUT } subprocess.Popen( ['dp4p', 'run', '--path', self.path, '--port', str(self.app_port), '--identifier', self.app_identifier], **kwargs) def server_stop(self): pids = subprocess.Popen(['pgrep', '-f', self.app_identifier], stdout=subprocess.PIPE) pids = pids.stdout.readlines() pids = [(e.decode('utf8') if sys.version_info[0] >= 3 else e).replace('\n', '') for e in (pids if pids else [])] for pid in pids: subprocess.Popen( ['kill', '-9', pid], stdout=self.dev_null, stderr=subprocess.STDOUT) def traverse(self): # Add app. path sys.path.append(self.path) for module in ('controller', 'model', 'helper'): if not self._traverse(module, self.helper.io.path.join(self.path, module)): return False self.tests[module].sort(key=lambda e: e[6]) return True def _traverse(self, module, path): for e in self.helper.io.path.browse(path, fullpath=False, recursive=True, conditions={'ext': 'py'}): path, cls = self.import_class(module, e) if cls is False: return False if not self.doctest: continue priority = 1000000 for m in dir(cls): attr = getattr(cls, m) docstring = attr.__doc__ if docstring and docstring.find('.. test::') != -1: # noinspection PyUnusedLocal def expect(alt, prio, *args, **kwargs): # Priority if args and len(args) == 1: prio = args[0] self.tests[module].append((cls, m, alt, path, kwargs, module, prio)) docstring = docstring[docstring.find('.. test::')+len('.. test::'):] docstring = '\n'.join([e for e in docstring.split('\n') if e.strip()]).strip() while True: if docstring.find('expect(') != 0 and docstring.find('!expect(') != 0: break next_stmt = docstring[2:].find('expect(') stmt = docstring[:next_stmt].strip() if next_stmt != -1 else docstring docstring = docstring[len(stmt):].strip() priority += 1 stmt = stmt.replace('expect(', 'expect(True, %s, ' % priority) stmt = stmt.replace('!expect(True,', 'expect(False,') # noinspection PyBroadException try: eval(stmt) except Exception: self.logging.info('* Test case parsing error, %s' % stmt) return False return True def import_class(self, mod, path): path = path.split('.')[0].split('/') if path[-1] == '__init__': path.pop() if mod in ('model', 'helper') and not path: return path, None cls_name = ''.join([e.capitalize() for e in (path[-1] if path else 'starter').split('_')] + [mod.capitalize()]) module = '.'.join(([self.app_module, mod] if self.app_module else [mod]) + path) # noinspection PyBroadException try: __import__(module) app = sys.modules[module] if module in sys.modules else None cls = getattr(app, cls_name, None) if not cls: self.logging.info('* Class load error, %s' % module) return path, False return path, cls except Exception as e: self.logging.info('* File import error, %s' % module) return path, False def run(self): self.logging.set_level('requests', self.logging.level.CRITICAL) server_executed = False for i in range(2*10): time.sleep(0.2) code, res = self.helper.web.http.get.text('http://127.0.0.1:%s/dp/identifier' % self.app_port) if res == self.app_identifier: server_executed = True break if i >= 5: self.logging.info('* Waiting server ..') time.sleep(0.3) if not server_executed: self.logging.info('* Server execution failed.') return exit(1) self.logging.info('*') session = True for e in self.tests['controller']: session, asserted = self._test_request(e, session) if not asserted: return False self.logging.info('*') for e in self.tests['model']: if not self._test_value(e): return False self.logging.info('*') for e in self.tests['helper']: if not self._test_value(e): return False self.logging.info('*') return True def _test_request(self, p, session): url_path = '/'.join(p[3]) path = '%s.%s.%s' % (p[5], '.'.join(p[3]), p[1]) method = p[1] req = {} res_type = None if 'args' in p[4] and p[4]['args']: url_path = '%s/%s' % (url_path, '/'.join(str(e) for e in p[4]['args'])) url = 'http://127.0.0.1:%s/%s' % (self.app_port, url_path) if 'params' in p[4] and p[4]['params']: req['data'] = p[4]['params'] if 'code' in p[4]: res_type = 'raw' if 'text' in p[4]: res_type = 'text' elif 'json' in p[4]: res_type = 'json' if method not in ('get', 'post', 'delete', 'patch', 'put', 'head', 'options') or not res_type: self.logging.info('* Method test, %s -> (%s) [INVALID]' % (path, req or '-')) return session, False session, code, res = self.helper.web.http.request( req_type=method, res_type=res_type, url=url, session=session or True, **req) asserted_code = None asserted_text = None asserted_json = None # Assertion, code if 'code' in p[4]: asserted_code = True if p[4]['code'] == code else False # Assertion, text if 'text' in p[4]: if res_type != 'text': res = str(res) asserted_text = True if p[4]['text'] == res else False # Assertion, json if 'json' in p[4]: res_a = res if self.helper.misc.type.check.string(res) else self.helper.serialization.json.stringify(res) res_a = self.helper.serialization.json.parse(res_a) res_b = self.helper.serialization.json.stringify(p[4]['json']) res_b = self.helper.serialization.json.parse(res_b) asserted_json = True if res_a == res_b else False asserted = asserted_code is False or asserted_text is False or asserted_json is False if p[2]: asserted = not asserted if not asserted: desc = [] if asserted_code is not None: desc.append('[CODE %s / %s]' % (p[4]['code'], code)) if asserted_text is not None: desc.append('[TEXT "%s" / "%s"]' % (p[4]['text'], res)) if asserted_json is not None: # noinspection PyUnboundLocalVariable res_a = self.helper.serialization.json.stringify(res_a) # noinspection PyUnboundLocalVariable res_b = self.helper.serialization.json.stringify(res_b) res_a = '%s..' % res_a[0:7] if len(res_a) > 7 else res_a res_b = '%s..' % res_b[0:7] if len(res_b) > 7 else res_b desc.append('[JSON %s / %s]' % (res_a, res_b)) desc = ' & '.join(desc) self.logging.info( '* Request test, [%s] %s => %s %s [FAIL]' % (method.upper(), url_path, '' if p[2] else '!', desc)) return session, False desc = [] if asserted_code is not None: desc.append('CODE') if asserted_text is not None: desc.append('TEXT') if asserted_json is not None: desc.append('JSON') self.logging.info( '* Request test, [%s] %s -> %s [OK]' % (method.upper(), url_path, ' & '.join(desc))) return session, True def _test_request_assertion(self, payload, result): pass def _test_value(self, p): cls = self._class(p[0]) method = getattr(cls, p[1]) path = '%s.%s.%s' % (p[5], '.'.join(p[3]), p[1]) req = None # noinspection PyBroadException try: if 'kwargs' in p[4] and p[4]['kwargs']: req = p[4]['kwargs'] got = method(**req) elif 'args' in p[4] and p[4]['args']: req = p[4]['args'] got = method(*req) else: got = method() except: self.logging.info('* Method execution error, %s.%s' % (p[0], p[1])) return False res, exp = self._test_value_assertion(p, got) if not res: self.logging.info( '* Method test, %s -> (%s) -> %s%s -> %s [FAIL]' % (path, req, '' if p[2] else '! ', got, exp)) return False got = str(got) req = str(req) got = '%s..' % got[0:7] if len(got) > 7 else got req = '%s..' % req[0:7] if len(req) > 7 else req self.logging.info( '* Method test, %s -> (%s) -> %s%s [OK]' % (path, req, '' if p[2] else '! ', got)) return True def _test_value_assertion(self, payload, result): expected = {} for k in ('int', 'long', 'bool', 'str', 'json'): if k in payload[4]: expected[k] = payload[4][k] for k, v in expected.items(): vo = v if k == 'json': vo = self.helper.serialization.json.stringify(vo) vo = self.helper.serialization.json.parse(vo) result = self.helper.serialization.json.stringify(result) result = self.helper.serialization.json.parse(result) if payload[2]: if vo != result: return False, v else: if vo == result: return False, v return True, None def _class(self, cls): if str(cls) in self.modules: return self.modules[str(cls)] self.modules[str(cls)] = cls() return self.modules[str(cls)]
from error import * from tokens import * from events import * from nodes import * from loader import * from dumper import * __version__ = '3.12' try: from cyaml import * __with_libyaml__ = True except ImportError: __with_libyaml__ = False def scan(stream, Loader=Loader): """ Scan a YAML stream and produce scanning tokens. """ loader = Loader(stream) try: while loader.check_token(): yield loader.get_token() finally: loader.dispose() def parse(stream, Loader=Loader): """ Parse a YAML stream and produce parsing events. """ loader = Loader(stream) try: while loader.check_event(): yield loader.get_event() finally: loader.dispose() def compose(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding representation tree. """ loader = Loader(stream) try: return loader.get_single_node() finally: loader.dispose() def compose_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding representation trees. """ loader = Loader(stream) try: while loader.check_node(): yield loader.get_node() finally: loader.dispose() def load(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding Python object. By default resolve only basic YAML tags, if an alternate Loader is provided, may be dangerous. """ loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose() safe_load = load def load_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding Python objects. By default resolve only basic YAML tags, if an alternate Loader is provided, may be dangerous. """ loader = Loader(stream) try: while loader.check_data(): yield loader.get_data() finally: loader.dispose() safe_load_all = load_all def danger_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. When used on untrusted input, can result in arbitrary code execution. """ return load(stream, DangerLoader) def danger_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. When used on untrusted input, can result in arbitrary code execution. """ return load_all(stream, DangerLoader) def emit(events, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): """ Emit YAML parsing events into a stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: from StringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) try: for event in events: dumper.emit(event) finally: dumper.dispose() if getvalue: return getvalue() def serialize_all(nodes, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of representation trees into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for node in nodes: dumper.serialize(node) dumper.close() finally: dumper.dispose() if getvalue: return getvalue() def serialize(node, stream=None, Dumper=Dumper, **kwds): """ Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead. """ return serialize_all([node], stream, Dumper=Dumper, **kwds) def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for data in documents: dumper.represent(data) dumper.close() finally: dumper.dispose() if getvalue: return getvalue() safe_dump_all = dump_all def danger_dump_all(documents, stream=None, **kwds): """ Serialize a sequence of Python objects into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all(documents, stream, Dumper=DangerDumper, **kwds) def dump(data, stream=None, Dumper=Dumper, **kwds): """ Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=Dumper, **kwds) safe_dump = dump def danger_dump(data, stream=None, **kwds): """ Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=DangerDumper, **kwds) def add_implicit_resolver(tag, regexp, first=None, Loader=Loader, Dumper=Dumper): """ Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None. """ Loader.add_implicit_resolver(tag, regexp, first) Dumper.add_implicit_resolver(tag, regexp, first) def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): """ Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None. """ Loader.add_path_resolver(tag, path, kind) Dumper.add_path_resolver(tag, path, kind) def add_constructor(tag, constructor, Loader=Loader): """ Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object. """ Loader.add_constructor(tag, constructor) def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): """ Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object. """ Loader.add_multi_constructor(tag_prefix, multi_constructor) def add_representer(data_type, representer, Dumper=Dumper): """ Add a representer for the given type. Representer is a function accepting a Dumper instance and an instance of the given data type and producing the corresponding representation node. """ Dumper.add_representer(data_type, representer) def add_multi_representer(data_type, multi_representer, Dumper=Dumper): """ Add a representer for the given type. Multi-representer is a function accepting a Dumper instance and an instance of the given data type or subtype and producing the corresponding representation node. """ Dumper.add_multi_representer(data_type, multi_representer) class YAMLObjectMetaclass(type): """ The metaclass for YAMLObject. """ def __init__(cls, name, bases, kwds): super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) cls.yaml_dumper.add_representer(cls, cls.to_yaml) class YAMLObject(object): """ An object that can dump itself to a YAML stream and load itself from a YAML stream. """ __metaclass__ = YAMLObjectMetaclass __slots__ = () # no direct instantiation, so allow immutable subclasses yaml_loader = Loader yaml_dumper = Dumper yaml_tag = None yaml_flow_style = None def from_yaml(cls, loader, node): """ Convert a representation node to a Python object. """ return loader.construct_yaml_object(node, cls) from_yaml = classmethod(from_yaml) def to_yaml(cls, dumper, data): """ Convert a Python object to a representation node. """ return dumper.represent_yaml_object(cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style) to_yaml = classmethod(to_yaml)
"""Trace treadmill application events. """ import functools import logging import urllib.request import urllib.parse import urllib.error import click from treadmill import context from treadmill import cli from treadmill import restclient from treadmill.websocket import client as wsc from treadmill.apptrace import events _LOGGER = logging.getLogger(__name__) def _find_endpoints(pattern, proto, endpoint, api=None): """Return all the matching endpoints in the cell. The return value is a dict with host-endpoint assigments as key-value pairs. """ apis = context.GLOBAL.state_api(api) url = '/endpoint/{}/{}/{}'.format(pattern, proto, endpoint) response = restclient.get(apis, url) endpoints = response.json() if not endpoints: cli.bad_exit("Nodeinfo API couldn't be found") return endpoints def _filter_by_uniq(in_=None, out_=None, uniq=None): """Only keep the events that belong to the 'uniq' in params.""" event = events.AppTraceEvent.from_dict(in_['event']) if event is None: return True if uniq is not None and getattr(event, 'uniqueid', None) != uniq: return True out_.append(event) return True def _get_instance_trace(instance, uniq, ws_api=None): """Get the history of the given instance/uniq.""" rv = [] message = {'topic': '/trace', 'filter': instance, 'snapshot': True} on_message = functools.partial(_filter_by_uniq, out_=rv, uniq=uniq) wsc.ws_loop(ws_api, message, True, on_message) return rv def _find_uniq_instance(instance, uniq, ws_api=None): if uniq == 'running': uniq = None history = _get_instance_trace(instance, uniq, ws_api) _LOGGER.debug('Instance %s/%s trace: %s', instance, uniq, history) # keep only those items from which uniq can be found out history = [item for item in history if hasattr(item, 'uniqueid')] if not history: return {} def get_timestamp(obj): """Get the timestamp attribute of the object.""" return getattr(obj, 'timestamp', None) last = max(history, key=get_timestamp) _LOGGER.debug("Instance %s's last trace item: %s", instance, last) return {'instanceid': last.instanceid, 'host': getattr(last, 'source', None), 'uniq': getattr(last, 'uniqueid', None)} def _instance_to_host(in_=None, out_=None): """Update out_ so it contains 'instance: host' as key: value pairs.""" if 'host' not in in_: return True out_.update({'instanceid': in_['name'], 'host': in_['host'], 'uniq': 'running'}) return False def _find_running_instance(app, ws_api=None): """Find the instance name(s) and host(s) corresponding to the app pattern. """ rv = {} message = {'topic': '/endpoints', 'filter': app, 'proto': 'tcp', 'endpoint': 'ssh', 'snapshot': True} on_message = functools.partial(_instance_to_host, out_=rv) wsc.ws_loop(ws_api, message, True, on_message) return rv def init(): """Return top level command handler.""" @click.command() @click.option('--api', envvar='TREADMILL_STATEAPI', help='State API url to use.', metavar='URL', required=False) @click.argument('app-or-svc') @click.option('--cell', callback=cli.handle_context_opt, envvar='TREADMILL_CELL', expose_value=False, required=True) @click.option('--host', help='Hostname where to look for the logs', required=False) @click.option('--service', help='The name of the service for which the logs are ' 'to be retreived', required=False) @click.option('--uniq', default='running', help="The container id. Specify this if you look for a " "not-running (terminated) application's log", required=False) @click.option('--ws-api', help='Websocket API url to use.', metavar='URL', required=False) def logs(api, app_or_svc, host, service, uniq, ws_api): """View application's service logs. Arguments are expected to be specified a) either as one string or b) parts defined one-by-one ie.: a) <appname>/<uniq or running>/service/<servicename> b) <appname> --uniq <uniq> --service <servicename> Eg.: a) proid.foo#1234/xz9474as8/service/my-echo b) proid.foo#1234 --uniq xz9474as8 --service my-echo For the latest log simply omit 'uniq': proid.foo#1234 --service my-echo """ try: app, uniq, logtype, logname = app_or_svc.split('/', 3) except ValueError: app, uniq, logtype, logname = app_or_svc, uniq, 'service', service if logname is None: cli.bad_exit("Please specify the 'service' parameter.") if host is None: instance = None if uniq == 'running': instance = _find_running_instance(app, ws_api) if not instance: instance = _find_uniq_instance(app, uniq, ws_api) if not instance: cli.bad_exit('No {}instance could be found.'.format( 'running ' if uniq == 'running' else '')) _LOGGER.debug('Found instance: %s', instance) host = instance['host'] uniq = instance['uniq'] try: endpoint, = ( ep for ep in _find_endpoints( urllib.parse.quote('root.*'), 'tcp', 'nodeinfo', api ) if ep['host'] == host ) except ValueError as err: _LOGGER.exception(err) cli.bad_exit('No endpoint found on %s', host) api = 'http://{0}:{1}'.format(endpoint['host'], endpoint['port']) logurl = '/app/%s/%s/%s/%s' % (urllib.parse.quote(app), urllib.parse.quote(uniq), logtype, urllib.parse.quote(logname)) log = restclient.get(api, logurl) click.echo(log.text) return logs
# -*- coding: utf-8 -*- # # Glue documentation build configuration file, created by # sphinx-quickstart on Mon Jun 25 12:05:47 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) import os ON_RTD = os.environ.get('READTHEDOCS') == 'True' import warnings # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Import matplotlib now to make sure the warning doesn't cause the Sphinx build # to fail with warnings.catch_warnings(): warnings.simplefilter("ignore") import sip sip.setapi('QString', 2) sip.setapi('QVariant', 2) sip.setapi('QDate', 2) sip.setapi('QDateTime', 2) sip.setapi('QTextStream', 2) sip.setapi('QTime', 2) sip.setapi('QUrl', 2) import PyQt5 import matplotlib.pyplot as plt # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx_automodapi.automodapi', 'numpydoc', 'sphinx.ext.intersphinx'] # Add the redirect.py plugin which is in this directory import sys sys.path.insert(0, os.path.abspath('.')) extensions.append('redirect') # Workaround for RTD where the default encoding is ASCII if ON_RTD: import locale locale.setlocale(locale.LC_ALL, 'C.UTF-8') intersphinx_cache_limit = 10 # days to keep the cached inventories intersphinx_mapping = { 'sphinx': ('http://www.sphinx-doc.org/en/latest/', None), 'python': ('https://docs.python.org/2.7', None), 'matplotlib': ('http://matplotlib.org', None), 'numpy': ('https://docs.scipy.org/doc/numpy', None), 'astropy': ('http://docs.astropy.org/en/stable/', None), 'echo': ('http://echo.readthedocs.io/en/latest/', None), } numpydoc_show_class_members = False autosummary_generate = True automodapi_toctreedirnm = 'api' # At the moment, sphinx-automodapi causes a warning to appear about autoattribute being # registered twice, but this will be fixed in the next release. suppress_warnings = ['app.add_directive', 'app.add_node'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Glue' copyright = u'2012-2017, Chris Beaumont, Thomas Robitaille, Michelle Borkin' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from glue import __version__ # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_templates', '.eggs'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. try: # use ReadTheDocs theme, if installed import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), ] except ImportError: pass # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Gluedoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Glue.tex', u'Glue Documentation', u'Chris Beaumont, Thomas Robitaille, Michelle Borkin', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'glue', u'Glue Documentation', [u'Chris Beaumont, Thomas Robitaille, Michelle Borkin'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Glue', u'Glue Documentation', u'Chris Beaumont, Thomas Robitaille, Michelle Borkin', 'Glue', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' todo_include_todos = True autoclass_content = 'both' nitpick_ignore = [('py:class', 'object'), ('py:class', 'str'), ('py:class', 'list'), ('py:obj', 'numpy array'), ('py:obj', 'integer'), ('py:obj', 'Callable'), ('py:obj', 'list'), ('py:class', 'PySide.QtGui.QMainWindow'), ('py:class', 'PySide.QtGui.QWidget'), ('py:class', 'PyQt4.QtGui.QTextEdit'), ('py:class', 'PyQt4.QtGui.QTabBar'), ('py:class', 'PyQt4.QtGui.QLabel'), ('py:class', 'PyQt4.QtGui.QComboBox'), ('py:class', 'PyQt4.QtGui.QMessageBox'), ('py:class', 'PyQt4.QtGui.QToolBar'), ('py:class', 'PyQt4.QtCore.QMimeData'), ('py:class', 'PyQt4.QtCore.QAbstractListModel'), ('py:class', 'PyQt4.QtCore.QThread'), ('py:class', 'PyQt4.QtGui.QStyledItemDelegate'), ('py:class', 'PyQt5.QtWidgets.QMainWindow'), ('py:class', 'PyQt5.QtWidgets.QWidget'), ('py:class', 'PyQt5.QtWidgets.QTextEdit'), ('py:class', 'PyQt5.QtWidgets.QTabBar'), ('py:class', 'PyQt5.QtWidgets.QLabel'), ('py:class', 'PyQt5.QtWidgets.QComboBox'), ('py:class', 'PyQt5.QtWidgets.QMessageBox'), ('py:class', 'PyQt5.QtWidgets.QToolBar'), ('py:class', 'PyQt5.QtWidgets.QStyledItemDelegate'), ('py:class', 'PyQt5.QtCore.QMimeData'), ('py:class', 'PyQt5.QtCore.QAbstractListModel'), ('py:class', 'PyQt5.QtCore.QThread'), ('py:obj', "str ('file' | 'directory' | 'label')"), ('py:obj', 'function(application)'), ('py:class', 'builtins.object'), ('py:class', 'builtins.list'), ('py:class', 'builtins.type'), ('py:class', 'glue.viewers.histogram.layer_artist.HistogramLayerBase'), ('py:class', 'glue.viewers.scatter.layer_artist.ScatterLayerBase'), ('py:class', 'glue.viewers.image.layer_artist.ImageLayerBase'), ('py:class', 'glue.viewers.image.layer_artist.RGBImageLayerBase'), ('py:class', 'PyQt4.QtGui.QMainWindow'), ('py:class', 'PyQt4.QtGui.QWidget'), ('py:mod', 'glue.core'), ('py:mod', 'glue.viewers'), ('py:mod', 'glue.viewers.scatter'), ('py:mod', 'glue.viewers.common'), ('py:mod', 'glue.viewers.common.qt.mouse_mode'), ('py:mod', 'glue.dialogs.custom_component'), ('py:class', 'glue.external.echo.core.HasCallbackProperties'), ('py:class', 'glue.external.echo.core.CallbackProperty'), ('py:class', 'glue.external.echo.selection.SelectionCallbackProperty'), ('py:class', 'glue.viewers.image.state.BaseImageLayerState'), ('py:class', 'glue.viewers.common.qt.data_viewer_with_state.DataViewerWithState') ] # coax Sphinx into treating descriptors as attributes # see https://bitbucket.org/birkenfeld/sphinx/issue/1254/#comment-7587063 from glue.utils.qt.widget_properties import WidgetProperty WidgetProperty.__get__ = lambda self, *args, **kwargs: self viewcode_import = False
import re import sys import time import typing from mitmproxy import exceptions from mitmproxy.net.http import headers from mitmproxy.net.http import request from mitmproxy.net.http import response from mitmproxy.net.http import url def get_header_tokens(headers, key): """ Retrieve all tokens for a header key. A number of different headers follow a pattern where each header line can containe comma-separated tokens, and headers can be set multiple times. """ if key not in headers: return [] tokens = headers[key].split(",") return [token.strip() for token in tokens] def read_request(rfile, body_size_limit=None): request = read_request_head(rfile) expected_body_size = expected_http_body_size(request) request.data.content = b"".join(read_body(rfile, expected_body_size, limit=body_size_limit)) request.timestamp_end = time.time() return request def read_request_head(rfile): """ Parse an HTTP request head (request line + headers) from an input stream Args: rfile: The input stream Returns: The HTTP request object (without body) Raises: exceptions.HttpReadDisconnect: No bytes can be read from rfile. exceptions.HttpSyntaxException: The input is malformed HTTP. exceptions.HttpException: Any other error occurred. """ timestamp_start = time.time() if hasattr(rfile, "reset_timestamps"): rfile.reset_timestamps() host, port, method, scheme, authority, path, http_version = _read_request_line(rfile) headers = _read_headers(rfile) if hasattr(rfile, "first_byte_timestamp"): # more accurate timestamp_start timestamp_start = rfile.first_byte_timestamp return request.Request( host, port, method, scheme, authority, path, http_version, headers, None, None, timestamp_start, None ) def read_response(rfile, request, body_size_limit=None): response = read_response_head(rfile) expected_body_size = expected_http_body_size(request, response) response.data.content = b"".join(read_body(rfile, expected_body_size, body_size_limit)) response.timestamp_end = time.time() return response def read_response_head(rfile): """ Parse an HTTP response head (response line + headers) from an input stream Args: rfile: The input stream Returns: The HTTP request object (without body) Raises: exceptions.HttpReadDisconnect: No bytes can be read from rfile. exceptions.HttpSyntaxException: The input is malformed HTTP. exceptions.HttpException: Any other error occurred. """ timestamp_start = time.time() if hasattr(rfile, "reset_timestamps"): rfile.reset_timestamps() http_version, status_code, message = _read_response_line(rfile) headers = _read_headers(rfile) if hasattr(rfile, "first_byte_timestamp"): # more accurate timestamp_start timestamp_start = rfile.first_byte_timestamp return response.Response(http_version, status_code, message, headers, None, None, timestamp_start, None) def read_body(rfile, expected_size, limit=None, max_chunk_size=4096): """ Read an HTTP message body Args: rfile: The input stream expected_size: The expected body size (see :py:meth:`expected_body_size`) limit: Maximum body size max_chunk_size: Maximium chunk size that gets yielded Returns: A generator that yields byte chunks of the content. Raises: exceptions.HttpException, if an error occurs Caveats: max_chunk_size is not considered if the transfer encoding is chunked. """ if not limit or limit < 0: limit = sys.maxsize if not max_chunk_size: max_chunk_size = limit if expected_size is None: for x in _read_chunked(rfile, limit): yield x elif expected_size >= 0: if limit is not None and expected_size > limit: raise exceptions.HttpException( "HTTP Body too large. " "Limit is {}, content length was advertised as {}".format(limit, expected_size) ) bytes_left = expected_size while bytes_left: chunk_size = min(bytes_left, max_chunk_size) content = rfile.read(chunk_size) if len(content) < chunk_size: raise exceptions.HttpException("Unexpected EOF") yield content bytes_left -= chunk_size else: bytes_left = limit while bytes_left: chunk_size = min(bytes_left, max_chunk_size) content = rfile.read(chunk_size) if not content: return yield content bytes_left -= chunk_size not_done = rfile.read(1) if not_done: raise exceptions.HttpException("HTTP body too large. Limit is {}.".format(limit)) def connection_close(http_version, headers): """ Checks the message to see if the client connection should be closed according to RFC 2616 Section 8.1. If we don't have a Connection header, HTTP 1.1 connections are assumed to be persistent. """ if "connection" in headers: tokens = get_header_tokens(headers, "connection") if "close" in tokens: return True elif "keep-alive" in tokens: return False return http_version != "HTTP/1.1" and http_version != b"HTTP/1.1" def expected_http_body_size( request: request.Request, response: typing.Optional[response.Response] = None, expect_continue_as_0: bool = True ): """ Args: - expect_continue_as_0: If true, incorrectly predict a body size of 0 for requests which are waiting for a 100 Continue response. Returns: The expected body length: - a positive integer, if the size is known in advance - None, if the size in unknown in advance (chunked encoding) - -1, if all data should be read until end of stream. Raises: exceptions.HttpSyntaxException, if the content length header is invalid """ # Determine response size according to # http://tools.ietf.org/html/rfc7230#section-3.3 if not response: headers = request.headers if expect_continue_as_0 and headers.get("expect", "").lower() == "100-continue": return 0 else: headers = response.headers if request.method.upper() == "HEAD": return 0 if 100 <= response.status_code <= 199: return 0 if response.status_code == 200 and request.method.upper() == "CONNECT": return 0 if response.status_code in (204, 304): return 0 if "chunked" in headers.get("transfer-encoding", "").lower(): return None if "content-length" in headers: try: sizes = headers.get_all("content-length") different_content_length_headers = any(x != sizes[0] for x in sizes) if different_content_length_headers: raise exceptions.HttpSyntaxException("Conflicting Content Length Headers") size = int(sizes[0]) if size < 0: raise ValueError() return size except ValueError as e: raise exceptions.HttpSyntaxException("Unparseable Content Length") from e if not response: return 0 return -1 def _get_first_line(rfile): try: line = rfile.readline() if line == b"\r\n" or line == b"\n": # Possible leftover from previous message line = rfile.readline() except (exceptions.TcpDisconnect, exceptions.TlsException): raise exceptions.HttpReadDisconnect("Remote disconnected") if not line: raise exceptions.HttpReadDisconnect("Remote disconnected") return line.strip() def _read_request_line(rfile): try: line = _get_first_line(rfile) except exceptions.HttpReadDisconnect: # We want to provide a better error message. raise exceptions.HttpReadDisconnect("Client disconnected") try: method, target, http_version = line.split() if target == b"*" or target.startswith(b"/"): scheme, authority, path = b"", b"", target host, port = "", 0 elif method == b"CONNECT": scheme, authority, path = b"", target, b"" host, port = url.parse_authority(authority, check=True) if not port: raise ValueError else: scheme, rest = target.split(b"://", maxsplit=1) authority, path_ = rest.split(b"/", maxsplit=1) path = b"/" + path_ host, port = url.parse_authority(authority, check=True) port = port or url.default_port(scheme) if not port: raise ValueError # TODO: we can probably get rid of this check? url.parse(target) _check_http_version(http_version) except ValueError: raise exceptions.HttpSyntaxException(f"Bad HTTP request line: {line}") return host, port, method, scheme, authority, path, http_version def _read_response_line(rfile): try: line = _get_first_line(rfile) except exceptions.HttpReadDisconnect: # We want to provide a better error message. raise exceptions.HttpReadDisconnect("Server disconnected") try: parts = line.split(None, 2) if len(parts) == 2: # handle missing message gracefully parts.append(b"") http_version, status_code, message = parts status_code = int(status_code) _check_http_version(http_version) except ValueError: raise exceptions.HttpSyntaxException("Bad HTTP response line: {}".format(line)) return http_version, status_code, message def _check_http_version(http_version): if not re.match(br"^HTTP/\d\.\d$", http_version): raise exceptions.HttpSyntaxException("Unknown HTTP version: {}".format(http_version)) def _read_headers(rfile): """ Read a set of headers. Stop once a blank line is reached. Returns: A headers object Raises: exceptions.HttpSyntaxException """ ret = [] while True: line = rfile.readline() if not line or line == b"\r\n" or line == b"\n": # we do have coverage of this, but coverage.py does not detect it. break # pragma: no cover if line[0] in b" \t": if not ret: raise exceptions.HttpSyntaxException("Invalid headers") # continued header ret[-1] = (ret[-1][0], ret[-1][1] + b'\r\n ' + line.strip()) else: try: name, value = line.split(b":", 1) value = value.strip() if not name: raise ValueError() ret.append((name, value)) except ValueError: raise exceptions.HttpSyntaxException( "Invalid header line: %s" % repr(line) ) return headers.Headers(ret) def _read_chunked(rfile, limit=sys.maxsize): """ Read a HTTP body with chunked transfer encoding. Args: rfile: the input file limit: A positive integer """ total = 0 while True: line = rfile.readline(128) if line == b"": raise exceptions.HttpException("Connection closed prematurely") if line != b"\r\n" and line != b"\n": try: length = int(line, 16) except ValueError: raise exceptions.HttpSyntaxException("Invalid chunked encoding length: {}".format(line)) total += length if total > limit: raise exceptions.HttpException( "HTTP Body too large. Limit is {}, " "chunked content longer than {}".format(limit, total) ) chunk = rfile.read(length) suffix = rfile.readline(5) if suffix != b"\r\n": raise exceptions.HttpSyntaxException("Malformed chunked body") if length == 0: return yield chunk
#------------------------------------------------------------------------------ # Copyright 2014 Esri # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The portalpy provisioning package for working with the ArcGIS Online API.""" import copy import csv import json import logging import os import shutil import tempfile from portalpy import TEXT_BASED_ITEM_TYPES, FILE_BASED_ITEM_TYPES, PortalError,\ unicode_to_ascii ITEM_COPY_PROPERTIES = ['title', 'type', 'typekeywords', 'description', 'tags', 'snippet', 'extent', 'spatialreference', 'name', 'accessinformation', 'licenseinfo', 'culture', 'url', ] GROUP_COPY_PROPERTIES = ['title', 'description', 'tags', 'snippet', 'phone', 'access', 'isInvitationOnly'] GROUP_EXTRACT_PROPERTIES = ['id'] + GROUP_COPY_PROPERTIES clean_temp_files = True _log = logging.getLogger(__name__) def copy_items(items, source, target, target_user, target_folder=None, relationships=None, work_dir=tempfile.gettempdir()): """ Copy items from the source portal to the target portal.""" if not target.is_logged_in(): raise PortalError('Must be logged into target portal to copy') # Make sure the folder exists (or gets created) on the target portal target_folder_id = None if target_folder: target_folder_id = _get_or_create_folder(target, target_user, target_folder) # Create a temporary folder to use for copying items. copy_dir = tempfile.mkdtemp(prefix='copy_items_', dir=unicode_to_ascii(work_dir)) try: # Copy the items copied_items = _copy_items(items, source, target, target_user, target_folder_id, None, copy_dir) # Copy the related items (if specified) if relationships: related_items = _copy_relationships(copied_items, source, target, target_user, target_folder_id, relationships, copy_dir) copied_items.update(related_items) finally: if clean_temp_files: shutil.rmtree(copy_dir) return copied_items def copy_user_contents(source, source_user, target, target_user, ids=None, relationships=None, work_dir=tempfile.gettempdir()): """ Copy a user's items from the source portal to the target portal.""" if not source.is_logged_in(): raise PortalError('Must be logged into source portal to copy a '\ + 'user\'s contents') if not target.is_logged_in(): raise PortalError('Must be logged into target portal to copy a '\ + 'user\'s contents') # Get the user's content root_items, folders = source.user_contents(source_user) # Create a temporary folder to use for copying items. copy_dir = tempfile.mkdtemp(prefix='copy_user_content_', dir=unicode_to_ascii(work_dir)) try: # Copy the items in the root folder copied_items = _copy_items(root_items, source, target, target_user, None, ids, copy_dir) # Loop over all of the folders in the source portal, and get or create # the corresponding folder in the target portal for folder_id, folder_title, items in folders: target_folder_id = _get_or_create_folder(target, target_user, folder_title) copied_folder_items = _copy_items(items, source, target, target_user, target_folder_id, ids, copy_dir) copied_items.update(copied_folder_items) # Copy the related items (if specified) if relationships: related_items = _copy_relationships(copied_items, source, target, target_user, None, relationships, copy_dir) copied_items.update(related_items) finally: if clean_temp_files: shutil.rmtree(copy_dir) return copied_items def _copy_items(items, source, target, target_user, target_folder_id, ids, copy_dir): copied_items = dict() for item in items: itemid = item['id'] if not ids or itemid in ids: target_itemid = _copy_item(item, source, target, target_user, target_folder_id, None, copy_dir)[0] if target_itemid: copied_items[itemid] = target_itemid return copied_items def _copy_item(item, source, target, target_user, target_folder_id, relationships, copy_dir): itemid = item['id'] item_dir = os.path.join(copy_dir, itemid) os.makedirs(item_dir) try: # Create a new items with the subset of properties we want to # copy to the target portal target_item = _select_properties(item, ITEM_COPY_PROPERTIES) # If its a text-based item, then read the text and # add it to the request. Otherwise treat it as a # file-based item, download it and add to the request # as a file data_file = None if item['type'] in TEXT_BASED_ITEM_TYPES: text = source.item_data(itemid) if text and len(text) > 0: target_item['text'] = text elif item['type'] in FILE_BASED_ITEM_TYPES: data_file = source.item_datad(itemid, item_dir, item.get('name')) # Handle the thumbnail (if one exists) thumbnail_file = None if 'thumbnail' in item: thumbnail_file = source.item_thumbnaild( itemid, item_dir, unicode_to_ascii(item['thumbnail'])) # Handle the metadata (if it exists) metadata_file = source.item_metadatad(itemid, item_dir) # Add the item to the target portal target_itemid = target.add_item( target_item, data_file, thumbnail_file, metadata_file, target_user, unicode_to_ascii(target_folder_id)) if target_itemid: _log.info('Copied item ' + itemid + ' in source portal ' + 'to ' + target_itemid + ' in target portal') # We're returning a mapping of source id to target (dict). # But before we return, handle the related items (if specified) copied_items = dict({itemid: target_itemid}) if relationships: related_items = _copy_relationships(copied_items, source, target, target_user, target_folder_id, relationships, copy_dir) copied_items.update(related_items) return target_itemid, copied_items else: _log.warning('Item ' + itemid + ' was not copied '\ + 'to target portal') # Just log IOErrors (includes HTTPErrors) except IOError as e: _log.warning('Item ' + itemid + ' was not copied to target portal ' \ + '(' + str(e) + ')') # Clean up the item directories as we go finally: if clean_temp_files: shutil.rmtree(item_dir) # Return an empty tuple, if for some reason the copy didn't happen return None, None def _copy_relationships(items, source, target, target_user, target_folder_id, relationships, work_dir): all_copied_rel_items = dict() for source_id, target_id in items.items(): # Get the items related to source_id in the source portal related_items = source.related_items(source_id, relationships) for source_rel_item, rel_type, rel_direction in related_items: source_rel_id = source_rel_item['id'] # See if it's already been copied to the target target_rel_id = items.get(source_rel_id) if not target_rel_id: # If not, then copy it to the target target_rel_id, copied_rel_items =\ _copy_item(source_rel_item, source, target, target_user, target_folder_id, relationships, work_dir) if copied_rel_items: all_copied_rel_items.update(copied_rel_items) items.update(copied_rel_items) # Add the relationship if target_rel_id: target.add_relationship(target_user, target_id, target_rel_id, rel_type) else: _log.warning('Unable to copy related item ' + source_rel_id\ + ' to target portal') return all_copied_rel_items def _select_properties(properties, property_names): selected = dict() for property_name in property_names: property_value = properties.get(property_name) if property_value is not None: selected[property_name] = unicode_to_ascii(property_value) return selected def _get_or_create_folder(portal, owner, folder_title): for folder in portal.folders(owner): if folder['title'] == folder_title: return folder['id'] new_folder = portal.create_folder(owner, folder_title) if new_folder: return new_folder['id'] def copy_groups(groups, source, target, target_owner=None, work_dir=tempfile.gettempdir()): """ Copy group from the source portal to the target portal.""" if not target.is_logged_in(): raise PortalError('Must be logged into target portal to copy') # Create the temporary directory to use for copying groups (required # for thumbnails) copy_dir = tempfile.mkdtemp(prefix='copy_groups_', dir=work_dir) # Loop over each of the groups, copying one at a time copied_groups = dict() try: for group in groups: groupid = group['id'] group_dir = os.path.join(copy_dir, groupid) os.makedirs(group_dir) # Create a new groups with the subset of properties we want to # copy to the target portal. Handle switching between org and # public access when going from an org in a multitenant portal # and a single tenant portal target_group = _select_properties(group, GROUP_COPY_PROPERTIES) if target_group['access'] == 'org'\ and not target.is_multitenant(): target_group['access'] = 'public' elif target_group['access'] == 'public'\ and not source.is_multitenant()\ and target.is_multitenant() and target.is_org(): target_group['access'] = 'org' # Handle the thumbnail (if one exists) thumbnail_file = None if 'thumbnail' in group: thumbnail_file = source.group_thumbnaild( groupid, group_dir, group['thumbnail']) # Create the group in the target portal target_groupid = target.create_group(target_group, thumbnail_file) # If the group was created successfully, handling reassigning # the group (if the target_owner is specified and it's # different from the logged in user of the target portal if target_groupid: target_username = target.logged_in_user()['username'] if target_owner and (target_owner != target_username): target.reassign_group(target_groupid, target_owner) target.leave_group(target_groupid) copied_groups[groupid] = target_groupid _log.info('Copied group ' + groupid + ' in source portal ' + 'to ' + target_groupid + ' in target portal') else: _log.warning('Group ' + groupid + ' was not copied '\ + 'to target portal') # Clean up the group directories as we go if clean_temp_files: shutil.rmtree(group_dir) # Make sure we clean up the whole copy folder finally: if clean_temp_files: shutil.rmtree(copy_dir) return copied_groups class JSONSerializer(object): """ A class for serializing users, groups, and items to JSON.""" def __init__(self, data=True, metadata=True, thumbnails=True, indent=None): """ The JSONSerializer constructor. """ self.data = data self.metadata = metadata self.thumbnails = thumbnails self.indent = indent def serialize_groups(self, groups, path, portal=None): """ Serialize groups to JSON. """ base_dir = os.path.abspath(path) if not os.path.exists(base_dir): os.makedirs(base_dir) elif not os.path.isdir(base_dir): base_dir = os.path.dirname(base_dir) for group in groups: group_dir = os.path.join(base_dir, group['id']) if not os.path.exists(group_dir): os.makedirs(group_dir) # Write the thumbnail to a file (per the name specified in the item) if self.thumbnails: if not portal: raise PortalError('The "portal" argument is required to '\ + 'download thumbnails') thumbnail = group.get('thumbnail') if thumbnail: portal.item_thumbnaild(group['id'], group_dir, thumbnail) # Write the group itself to a file self.to_file(group, os.path.join(group_dir, 'group.json')) def serialize_items(self, items, path, portal=None): """ Serialize items to JSON. """ base_dir = os.path.abspath(path) if not os.path.exists(base_dir): os.makedirs(base_dir) elif not os.path.isdir(base_dir): base_dir = os.path.dirname(base_dir) for item in items: item_dir = os.path.join(base_dir, item['id']) if not os.path.exists(item_dir): os.makedirs(item_dir) # Write the thumbnail to a file (per the name specified in the item) if self.thumbnails: if not portal: raise PortalError('The "portal" argument is required to '\ + 'download thumbnails') thumbnail = item.get('thumbnail') if thumbnail: portal.item_thumbnaild(item['id'], item_dir, thumbnail) # Handle the data if self.data: if not portal: raise PortalError('The "portal" argument is required to '\ + 'download data') if item['type'] in TEXT_BASED_ITEM_TYPES: text = portal.item_data(item['id']) if text and len(text) > 0: item['text'] = text elif item['type'] in FILE_BASED_ITEM_TYPES: data_dir = os.path.join(item_dir, 'data') if not os.path.exists(data_dir): os.makedirs(data_dir) portal.item_datad(item['id'], data_dir, item.get('name')) # Write the metadata to a file if self.metadata: if not portal: raise PortalError('The "portal" argument is required to '\ + 'download metadata') portal.item_metadatad(item['id'], item_dir) # Write the item itself to a file (do this at the end, as the data # will get writen to the item if the item type is text) self.to_file(item, os.path.join(item_dir, 'item.json')) def to_file(self, data, path): with open(path, 'w') as outfile: json.dump(data, outfile, indent=self.indent) class JSONDeserializer(object): """ A class for deserializing users, groups, and items from JSON.""" def deserialize_groups(self, path): """ Deserialize groups from JSON. """ groups = [] group_reader = csv.DictReader(open(path, "rb")) for group in group_reader: groups.append(group) return groups def deserialize_items(self, path): """ Deserialize items from JSON. """ items = [] base_dir = os.path.abspath(path) if not os.path.exists(base_dir): _log.warn('Path being deserialized doesn\'t exist: ' + base_dir) return item_dirs = os.listdir(path) for item_dir in item_dirs: item_path = os.path.join(base_dir, item_dir, 'item.json') item = self.from_file(item_path) thumbnail_path = None thumbnail = item.get('thumbnail') if thumbnail: thumbnail_filename = os.path.basename(thumbnail) thumbnail_path = os.path.join(base_dir, item_dir, thumbnail_filename) data_path = None data_dir = os.path.join(base_dir, item_dir, 'data') if os.path.exists(data_dir): data_filename = item.get('name') if not data_filename: data_filename = 'data' data_path = os.path.join(data_dir, data_filename) metadata_path = os.path.join(base_dir, item_dir, 'metadata.xml') if not os.path.exists(metadata_path): metadata_path = None items.append((item, thumbnail_path, data_path, metadata_path)) return items def deserialize_item(self, path): """ Deserialize an item from JSON. """ if not os.path.exists(path): _log.warn('Path being deserialized doesn\'t exist: ' + path) return item_path = os.path.join(path, 'item.json') item = self.from_file(item_path) thumbnail_path = None thumbnail = item.get('thumbnail') if thumbnail: thumbnail_filename = os.path.basename(thumbnail) thumbnail_path = os.path.join(path, thumbnail_filename) data_path = None data_dir = os.path.join(path, 'data') if os.path.exists(data_dir): data_filename = item.get('name') if not data_filename: data_filename = 'data' data_path = os.path.join(data_dir, data_filename) metadata_path = os.path.join(path, 'metadata.xml') if not os.path.exists(metadata_path): metadata_path = None return (item, thumbnail_path, data_path, metadata_path) def from_file(self, path): with open(path, 'r') as infile: return json.load(infile) class CSVSerializer(object): """ A class for serializing users, groups, and items to CSV.""" def __init__(self, data=True, metadata=True, thumbnails=True): """ The CSVSerializer constructor. """ self.data = data self.metadata = metadata self.thumbnails = thumbnails def serialize_groups(self, groups, path, portal=None): """ Serialize groups to CSV. """ groups_copy = copy.deepcopy(groups) field_names = GROUP_EXTRACT_PROPERTIES if self.thumbnails: if not portal: raise PortalError('The "portal" argument is required to '\ + 'download thumbnails') field_names.append('thumbnail') base_dir = os.path.dirname(path) for i, group in enumerate(groups): if 'thumbnail' in group: group_dir = os.path.join(base_dir, group['id']) thumbnail_path = portal.group_thumbnaild( group['id'], group_dir, group['thumbnail']) groups_copy[i]['thumbnail'] = os.path.relpath( thumbnail_path, base_dir) group_writer = csv.DictWriter(open(path, "wb"), field_names) group_writer.writeheader() group_writer.writerows(groups_copy) class CSVDeserializer(object): """ A class for deserializing users, groups and items from CSV.""" def deserialize_groups(self, path): """ Deerialize groups from CSV. """ groups = [] if not os.path.isfile(path): raise PortalError('Specific path is not a file: ' + path) group_reader = csv.DictReader(open(path, "rb")) for group in group_reader: groups.append(group) return groups def deserialize_users(self, path): """ Deserialize users from CSV. """ users = [] if not os.path.isfile(path): raise PortalError('Specific path is not a file: ' + path) user_reader = csv.DictReader(open(path, "rb")) for user in user_reader: users.append(user) return users _known_serializers = {'csv': CSVSerializer, 'json': JSONSerializer} _known_deserializers = {'csv': CSVDeserializer, 'json': JSONDeserializer} def _select_deserializer(f, cls, **kw): if not cls: cls = _known_deserializers.get(f) if not cls: raise PortalError('Unsupported format \'' + f + '\' for deserialization') return cls(**kw) def _select_serializer(f, cls, **kw): if not cls: cls = _known_serializers.get(f) if not cls: raise PortalError('Unsupported format \'' + f + '\' for serialization') return cls(**kw) def load_users(portal, path, f='json', cls=None, **kw): """ Load users stored on disk into the portal. """ if portal.is_multitenant(): raise PortalError('Loading users into a multi-tenant portal is not ' + 'supported at this time') # Deserialize the users, and then loop over them one at a time to # add them to the portal deserializer = _select_deserializer(f, cls, **kw) users = deserializer.deserialize_users(path) for user in users: # Remove any properties that have no entry for property in list(user.keys()): if user[property] is None: del user[property] # Signup users in the portal using the signup operation portal.signup(user['username'], user['password'], user['fullname'], user.get('email')) return users def load_groups(portal, path, f='json', cls=None, **kw): """ Load groups stored on disk into the portal. """ groups, source_ids = [], [] # Deserialize the groups, and then loop over them one at a time to # add them to the portal deserializer = _select_deserializer(f, cls, **kw) dgroups = deserializer.deserialize_groups(path) for dgroup in dgroups: # Pop out some important group properties, which shouldn't be included # in the group dict passed to the create_group function source_id = dgroup.pop('id', None) thumbnail = dgroup.pop('thumbnail', None) owner = dgroup.pop('owner', portal.logged_in_user()['username']) # Remove any properties that have no entry for property in list(dgroup.keys()): if dgroup[property] is None: del dgroup[property] # Create the group, returns the group id in portal # # TODO Consider duplicates (any case for updating) id = portal.create_group(dgroup, thumbnail) # If an ID was returned, add results to return objects if id: group = dict(id=id, owner=owner, **dgroup) groups.append(group) source_ids.append(source_id) return groups, source_ids def load_items(portal, path, f='json', cls=None, **kw): """ Load items stored on disk into the portal. """ items, source_ids = [], [] # Deserialize the items, and then loop over them one at a time to # add them to the portal deserializer = _select_deserializer(f, cls, **kw) ditems = deserializer.deserialize_items(path) for ditem_tuple in ditems: ditem = ditem_tuple[0] thumbnail = ditem_tuple[1] data = ditem_tuple[2] metadata = ditem_tuple[3] # Pop out some important item properties, which shouldn't be included # in the item dict passed to the add_item function source_id = ditem.pop('id', None) ditem.pop('owner') ditem.pop('thumbnail') # Remove any properties that have no entry for property in list(ditem.keys()): if ditem[property] is None: del ditem[property] # Add the item, returns the item id in portal # TODO Consider duplicates (any case for updating) id = portal.add_item(ditem, data, thumbnail, metadata) # If an ID was returned, add results to return objects if id: item = dict(id=id, owner=portal.logged_in_user()['username'], **ditem) items.append(item) source_ids.append(source_id) return items, source_ids def load_item(portal, path, overwrite_id=None, f='json', cls=None, **kw): """ Load item stored on disk into the portal. Pass existing item id (overwrite_id) if updating item; otherwise item is added. """ # Deserialize the item, and add the item # or update an existing item deserializer = _select_deserializer(f, cls, **kw) ditem_tuple = deserializer.deserialize_item(path) ditem = ditem_tuple[0] thumbnail = ditem_tuple[1] data = ditem_tuple[2] metadata = ditem_tuple[3] # Pop out some important item properties, which shouldn't be included # in the item dict passed to the add_item function source_id = ditem.pop('id', None) ditem.pop('owner') ditem.pop('thumbnail') # Remove any properties that have no entry for property in list(ditem.keys()): if ditem[property] is None: del ditem[property] # Add/Update the item, returns the item id in portal if not overwrite_id: id = portal.add_item(ditem, data, thumbnail, metadata) else: portal.update_item(overwrite_id, ditem, data, metadata, thumbnail) id = overwrite_id # If an ID was returned, add results to return objects if id: item = dict(id=id, owner=portal.logged_in_user()['username'], **ditem) return item, source_id def save_groups(portal, groups, path, f='json', cls=None, **kw): """ Save groups in the portal to disk. """ serializer = _select_serializer(f, cls, **kw) serializer.serialize_groups(groups, path, portal) def save_items(portal, items, path, f='json', cls=None, **kw): """ Save items in the portal to disk. """ serializer = _select_serializer(f, cls, **kw) serializer.serialize_items(items, path, portal)
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS TSIG support.""" import hmac import struct import sys import dns.exception import dns.hash import dns.rdataclass import dns.name from ._compat import long, string_types, text_type class BadTime(dns.exception.DNSException): """The current time is not within the TSIG's validity time.""" class BadSignature(dns.exception.DNSException): """The TSIG signature fails to verify.""" class PeerError(dns.exception.DNSException): """Base class for all TSIG errors generated by the remote peer""" class PeerBadKey(PeerError): """The peer didn't know the key we used""" class PeerBadSignature(PeerError): """The peer didn't like the signature we sent""" class PeerBadTime(PeerError): """The peer didn't like the time we sent""" class PeerBadTruncation(PeerError): """The peer didn't like amount of truncation in the TSIG we sent""" # TSIG Algorithms HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT") HMAC_SHA1 = dns.name.from_text("hmac-sha1") HMAC_SHA224 = dns.name.from_text("hmac-sha224") HMAC_SHA256 = dns.name.from_text("hmac-sha256") HMAC_SHA384 = dns.name.from_text("hmac-sha384") HMAC_SHA512 = dns.name.from_text("hmac-sha512") _hashes = { HMAC_SHA224: 'SHA224', HMAC_SHA256: 'SHA256', HMAC_SHA384: 'SHA384', HMAC_SHA512: 'SHA512', HMAC_SHA1: 'SHA1', HMAC_MD5: 'MD5', } default_algorithm = HMAC_MD5 BADSIG = 16 BADKEY = 17 BADTIME = 18 BADTRUNC = 22 def sign(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx=None, multi=False, first=True, algorithm=default_algorithm): """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata for the input parameters, the HMAC MAC calculated by applying the TSIG signature algorithm, and the TSIG digest context. @rtype: (string, string, hmac.HMAC object) @raises ValueError: I{other_data} is too long @raises NotImplementedError: I{algorithm} is not supported """ if isinstance(other_data, text_type): other_data = other_data.encode() (algorithm_name, digestmod) = get_algorithm(algorithm) if first: ctx = hmac.new(secret, digestmod=digestmod) ml = len(request_mac) if ml > 0: ctx.update(struct.pack('!H', ml)) ctx.update(request_mac) id = struct.pack('!H', original_id) ctx.update(id) ctx.update(wire[2:]) if first: ctx.update(keyname.to_digestable()) ctx.update(struct.pack('!H', dns.rdataclass.ANY)) ctx.update(struct.pack('!I', 0)) long_time = time + long(0) upper_time = (long_time >> 32) & long(0xffff) lower_time = long_time & long(0xffffffff) time_mac = struct.pack('!HIH', upper_time, lower_time, fudge) pre_mac = algorithm_name + time_mac ol = len(other_data) if ol > 65535: raise ValueError('TSIG Other Data is > 65535 bytes') post_mac = struct.pack('!HH', error, ol) + other_data if first: ctx.update(pre_mac) ctx.update(post_mac) else: ctx.update(time_mac) mac = ctx.digest() mpack = struct.pack('!H', len(mac)) tsig_rdata = pre_mac + mpack + mac + id + post_mac if multi: ctx = hmac.new(secret, digestmod=digestmod) ml = len(mac) ctx.update(struct.pack('!H', ml)) ctx.update(mac) else: ctx = None return (tsig_rdata, mac, ctx) def hmac_md5(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx=None, multi=False, first=True, algorithm=default_algorithm): return sign(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx, multi, first, algorithm) def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata, tsig_rdlen, ctx=None, multi=False, first=True): """Validate the specified TSIG rdata against the other input parameters. @raises FormError: The TSIG is badly formed. @raises BadTime: There is too much time skew between the client and the server. @raises BadSignature: The TSIG signature did not validate @rtype: hmac.HMAC object""" (adcount,) = struct.unpack("!H", wire[10:12]) if adcount == 0: raise dns.exception.FormError adcount -= 1 new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start] current = tsig_rdata (aname, used) = dns.name.from_wire(wire, current) current = current + used (upper_time, lower_time, fudge, mac_size) = \ struct.unpack("!HIHH", wire[current:current + 10]) time = ((upper_time + long(0)) << 32) + (lower_time + long(0)) current += 10 mac = wire[current:current + mac_size] current += mac_size (original_id, error, other_size) = \ struct.unpack("!HHH", wire[current:current + 6]) current += 6 other_data = wire[current:current + other_size] current += other_size if current != tsig_rdata + tsig_rdlen: raise dns.exception.FormError if error != 0: if error == BADSIG: raise PeerBadSignature elif error == BADKEY: raise PeerBadKey elif error == BADTIME: raise PeerBadTime elif error == BADTRUNC: raise PeerBadTruncation else: raise PeerError('unknown TSIG error code %d' % error) time_low = time - fudge time_high = time + fudge if now < time_low or now > time_high: raise BadTime (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx, multi, first, aname) if (our_mac != mac): raise BadSignature return ctx def get_algorithm(algorithm): """Returns the wire format string and the hash module to use for the specified TSIG algorithm @rtype: (string, hash constructor) @raises NotImplementedError: I{algorithm} is not supported """ if isinstance(algorithm, string_types): algorithm = dns.name.from_text(algorithm) try: return (algorithm.to_digestable(), dns.hash.hashes[_hashes[algorithm]]) except KeyError: raise NotImplementedError("TSIG algorithm " + str(algorithm) + " is not supported") def get_algorithm_and_mac(wire, tsig_rdata, tsig_rdlen): """Return the tsig algorithm for the specified tsig_rdata @raises FormError: The TSIG is badly formed. """ current = tsig_rdata (aname, used) = dns.name.from_wire(wire, current) current = current + used (upper_time, lower_time, fudge, mac_size) = \ struct.unpack("!HIHH", wire[current:current + 10]) current += 10 mac = wire[current:current + mac_size] current += mac_size if current > tsig_rdata + tsig_rdlen: raise dns.exception.FormError return (aname, mac)
#!/usr/bin/env python ''' Ansible module for zabbix items ''' # vim: expandtab:tabstop=4:shiftwidth=4 # # Zabbix item ansible module # # # Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This is in place because each module looks similar to each other. # These need duplicate code as their behavior is very similar # but different for each zabbix class. # pylint: disable=duplicate-code # pylint: disable=import-error from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection def exists(content, key='result'): ''' Check if key exists in content or the size of content[key] > 0 ''' if not content.has_key(key): return False if not content[key]: return False return True def get_value_type(value_type): ''' Possible values: 0 - numeric float; 1 - character; 2 - log; 3 - numeric unsigned; 4 - text ''' vtype = 0 if 'int' in value_type: vtype = 3 elif 'char' in value_type: vtype = 1 elif 'str' in value_type: vtype = 4 return vtype def get_app_ids(application_names, app_name_ids): ''' get application ids from names ''' applications = [] if application_names: for app in application_names: applications.append(app_name_ids[app]) return applications def get_template_id(zapi, template_name): ''' get related templates ''' template_ids = [] app_ids = {} # Fetch templates by name content = zapi.get_content('template', 'get', {'search': {'host': template_name}, 'selectApplications': ['applicationid', 'name']}) if content.has_key('result'): template_ids.append(content['result'][0]['templateid']) for app in content['result'][0]['applications']: app_ids[app['name']] = app['applicationid'] return template_ids, app_ids def get_multiplier(inval): ''' Determine the multiplier ''' if inval == None or inval == '': return None, None rval = None try: rval = int(inval) except ValueError: pass if rval: return rval, True return rval, False # The branches are needed for CRUD and error handling # pylint: disable=too-many-branches def main(): ''' ansible zabbix module for zbx_item ''' module = AnsibleModule( argument_spec=dict( zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), zbx_debug=dict(default=False, type='bool'), name=dict(default=None, type='str'), key=dict(default=None, type='str'), template_name=dict(default=None, type='str'), zabbix_type=dict(default=2, type='int'), value_type=dict(default='int', type='str'), multiplier=dict(default=None, type='str'), description=dict(default=None, type='str'), units=dict(default=None, type='str'), applications=dict(default=None, type='list'), state=dict(default='present', type='str'), ), #supports_check_mode=True ) zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], module.params['zbx_user'], module.params['zbx_password'], module.params['zbx_debug'])) #Set the instance and the template for the rest of the calls zbx_class_name = 'item' state = module.params['state'] templateid, app_name_ids = get_template_id(zapi, module.params['template_name']) # Fail if a template was not found matching the name if not templateid: module.exit_json(failed=True, changed=False, results='Error: Could find template with name %s for item.' % module.params['template_name'], state="Unkown") content = zapi.get_content(zbx_class_name, 'get', {'search': {'key_': module.params['key']}, 'selectApplications': 'applicationid', 'templateids': templateid, }) #******# # GET #******# if state == 'list': module.exit_json(changed=False, results=content['result'], state="list") #******# # DELETE #******# if state == 'absent': if not exists(content): module.exit_json(changed=False, state="absent") content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']]) module.exit_json(changed=True, results=content['result'], state="absent") # Create and Update if state == 'present': formula, use_multiplier = get_multiplier(module.params['multiplier']) params = {'name': module.params.get('name', module.params['key']), 'key_': module.params['key'], 'hostid': templateid[0], 'type': module.params['zabbix_type'], 'value_type': get_value_type(module.params['value_type']), 'applications': get_app_ids(module.params['applications'], app_name_ids), 'formula': formula, 'multiplier': use_multiplier, 'description': module.params['description'], 'units': module.params['units'], } # Remove any None valued params _ = [params.pop(key, None) for key in params.keys() if params[key] is None] #******# # CREATE #******# if not exists(content): content = zapi.get_content(zbx_class_name, 'create', params) if content.has_key('error'): module.exit_json(failed=True, changed=True, results=content['error'], state="present") module.exit_json(changed=True, results=content['result'], state='present') ######## # UPDATE ######## _ = params.pop('hostid', None) differences = {} zab_results = content['result'][0] for key, value in params.items(): if key == 'applications': app_ids = [item['applicationid'] for item in zab_results[key]] if set(app_ids) != set(value): differences[key] = value elif zab_results[key] != value and zab_results[key] != str(value): differences[key] = value if not differences: module.exit_json(changed=False, results=zab_results, state="present") # We have differences and need to update differences['itemid'] = zab_results['itemid'] content = zapi.get_content(zbx_class_name, 'update', differences) if content.has_key('error'): module.exit_json(failed=True, changed=False, results=content['error'], state="present") module.exit_json(changed=True, results=content['result'], state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required from ansible.module_utils.basic import * main()
#!/usr/bin/env python2 # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. import argparse from collections import Counter import logging import math import os import Queue import random import re import shutil import sys import threading import time # Find the best implementation available try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import h5py import lmdb import numpy as np import PIL.Image # Add path for DIGITS package sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) import digits.config from digits import utils, log # Import digits.config first to set the path to Caffe import caffe.io import caffe_pb2 logger = logging.getLogger('digits.tools.create_db') class Error(Exception): pass class BadInputFileError(Error): """Input file is empty""" pass class ParseLineError(Error): """Failed to parse a line in the input file""" pass class LoadError(Error): """Failed to load image[s]""" pass class WriteError(Error): """Failed to write image[s]""" pass class Hdf5DatasetExtendError(Error): """Failed to extend an hdf5 dataset""" pass class DbWriter(object): """ Abstract class for writing to databases """ def __init__(self, output_dir, image_height, image_width, image_channels): self._dir = output_dir os.makedirs(output_dir) self._image_height = image_height self._image_width = image_width self._image_channels = image_channels self._count = 0 def write_batch(self, batch): raise NotImplementedError def count(self): return self._count class LmdbWriter(DbWriter): # TODO pass class Hdf5Writer(DbWriter): """ A class for writing to HDF5 files """ LIST_FILENAME = 'list.txt' DTYPE = 'float32' def __init__(self, **kwargs): """ Keyword arguments: compression -- the type of dataset compression dset_limit -- the dataset size limit """ self._compression = kwargs.pop('compression', None) self._dset_limit = kwargs.pop('dset_limit', None) super(Hdf5Writer, self).__init__(**kwargs) self._db = None if self._dset_limit is not None: self._max_count = self._dset_limit / ( self._image_height * self._image_width * self._image_channels) else: self._max_count = None def write_batch(self, batch): # convert batch to numpy arrays if batch[0][0].ndim == 2: # add channel axis for grayscale images data_batch = np.array([i[0][...,np.newaxis] for i in batch]) else: data_batch = np.array([i[0] for i in batch]) # Transpose to (channels, height, width) data_batch = data_batch.transpose((0,3,1,2)) label_batch = np.array([i[1] for i in batch]) # first batch if self._db is None: self._create_new_file(len(batch)) self._db['data'][:] = data_batch self._db['label'][:] = label_batch self._count += len(batch) return current_count = self._db['data'].len() # will fit in current dataset if current_count + len(batch) <= self._max_count: self._db['data'].resize(current_count+len(batch),axis=0) self._db['label'].resize(current_count+len(batch),axis=0) self._db['data'][-len(batch):] = data_batch self._db['label'][-len(batch):] = label_batch self._count += len(batch) return # calculate how many will fit in current dataset split = self._max_count - current_count if split > 0: # put what we can into the current dataset self._db['data'].resize(self._max_count,axis=0) self._db['label'].resize(self._max_count,axis=0) self._db['data'][-split:] = data_batch[:split] self._db['label'][-split:] = label_batch[:split] self._count += split self._create_new_file(len(batch) - split) self._db['data'][:] = data_batch[split:] self._db['label'][:] = label_batch[split:] self._count += len(batch) - split def _create_new_file(self, initial_count): assert self._max_count is None or initial_count <= self._max_count, \ 'Your batch size is too large for your dataset limit - %d vs %d' % \ (initial_count, self._max_count) # close the old file if self._db is not None: self._db.close() mode = 'a' else: mode = 'w' # get the filename filename = self._new_filename() logger.info('Creating HDF5 database at "%s" ...' % os.path.join(*filename.split(os.sep)[-2:])) # update the list with open(self._list_filename(), mode) as outfile: outfile.write('%s\n' % filename) # create the new file self._db = h5py.File(os.path.join(self._dir, filename), 'w') # initialize the datasets self._db.create_dataset('data', (initial_count,self._image_channels, self._image_height,self._image_width), maxshape=(self._max_count,self._image_channels, self._image_height,self._image_width), chunks=True, compression=self._compression, dtype=self.DTYPE) self._db.create_dataset('label', (initial_count,), maxshape=(self._max_count,), chunks=True, compression=self._compression, dtype=self.DTYPE) def _list_filename(self): return os.path.join(self._dir, self.LIST_FILENAME) def _new_filename(self): return '%s.h5' % self.count() def create_db(input_file, output_dir, image_width, image_height, image_channels, backend, resize_mode = None, image_folder = None, shuffle = True, mean_files = None, **kwargs): """ Create a database of images from a list of image paths Raises exceptions on errors Arguments: input_file -- a textfile containing labelled image paths output_dir -- the location to store the created database image_width -- image resize width image_height -- image resize height image_channels -- image channels backend -- the DB format (lmdb/hdf5) Keyword arguments: resize_mode -- passed to utils.image.resize_image() shuffle -- if True, shuffle the images in the list before creating mean_files -- a list of mean files to save """ ### Validate arguments if not os.path.exists(input_file): raise ValueError('input_file does not exist') if os.path.exists(output_dir): logger.warning('removing existing database') if os.path.isdir(output_dir): shutil.rmtree(output_dir, ignore_errors=True) else: os.remove(output_dir) if image_width <= 0: raise ValueError('invalid image width') if image_height <= 0: raise ValueError('invalid image height') if image_channels not in [1,3]: raise ValueError('invalid number of channels') if resize_mode not in [None, 'crop', 'squash', 'fill', 'half_crop']: raise ValueError('invalid resize_mode') if image_folder is not None and not os.path.exists(image_folder): raise ValueError('image_folder does not exist') if mean_files: for mean_file in mean_files: if os.path.exists(mean_file): logger.warning('overwriting existing mean file "%s"!' % mean_file) else: dirname = os.path.dirname(mean_file) if not dirname: dirname = '.' if not os.path.exists(dirname): raise ValueError('Cannot save mean file at "%s"' % mean_file) compute_mean = bool(mean_files) ### Load lines from input_file into a load_queue load_queue = Queue.Queue() image_count = _fill_load_queue(input_file, load_queue, shuffle) # Start some load threads batch_size = _calculate_batch_size(image_count, bool(backend=='hdf5'), kwargs.get('hdf5_dset_limit'), image_channels, image_height, image_width) num_threads = _calculate_num_threads(batch_size, shuffle) write_queue = Queue.Queue(2*batch_size) summary_queue = Queue.Queue() for _ in xrange(num_threads): p = threading.Thread(target=_load_thread, args=(load_queue, write_queue, summary_queue, image_width, image_height, image_channels, resize_mode, image_folder, compute_mean), kwargs={'backend': backend, 'encoding': kwargs.get('encoding', None)}, ) p.daemon = True p.start() start = time.time() if backend == 'lmdb': _create_lmdb(image_count, write_queue, batch_size, output_dir, summary_queue, num_threads, mean_files, **kwargs) elif backend == 'hdf5': _create_hdf5(image_count, write_queue, batch_size, output_dir, image_width, image_height, image_channels, summary_queue, num_threads, mean_files, **kwargs) else: raise ValueError('invalid backend') logger.info('Database created after %d seconds.' % (time.time() - start)) def _create_lmdb(image_count, write_queue, batch_size, output_dir, summary_queue, num_threads, mean_files = None, encoding = None, lmdb_map_size = None, **kwargs): """ Create an LMDB Keyword arguments: encoding -- image encoding format lmdb_map_size -- the initial LMDB map size """ wait_time = time.time() threads_done = 0 images_loaded = 0 images_written = 0 image_sum = None batch = [] compute_mean = bool(mean_files) db = lmdb.open(output_dir, map_size=lmdb_map_size, map_async=True, max_dbs=0) while (threads_done < num_threads) or not write_queue.empty(): # Send update every 2 seconds if time.time() - wait_time > 2: logger.debug('Processed %d/%d' % (images_written, image_count)) wait_time = time.time() processed_something = False if not summary_queue.empty(): result_count, result_sum = summary_queue.get() images_loaded += result_count # Update total_image_sum if compute_mean and result_count > 0 and result_sum is not None: if image_sum is None: image_sum = result_sum else: image_sum += result_sum threads_done += 1 processed_something = True if not write_queue.empty(): datum = write_queue.get() batch.append(datum) if len(batch) == batch_size: _write_batch_lmdb(db, batch, images_written) images_written += len(batch) batch = [] processed_something = True if not processed_something: time.sleep(0.2) if len(batch) > 0: _write_batch_lmdb(db, batch, images_written) images_written += len(batch) if images_loaded == 0: raise LoadError('no images loaded from input file') logger.debug('%s images loaded' % images_loaded) if images_written == 0: raise WriteError('no images written to database') logger.info('%s images written to database' % images_written) if compute_mean: _save_means(image_sum, images_written, mean_files) db.close() def _create_hdf5(image_count, write_queue, batch_size, output_dir, image_width, image_height, image_channels, summary_queue, num_threads, mean_files = None, compression = None, hdf5_dset_limit = None, **kwargs): """ Create an HDF5 file Keyword arguments: compression -- dataset compression format """ wait_time = time.time() threads_done = 0 images_loaded = 0 images_written = 0 image_sum = None batch = [] compute_mean = bool(mean_files) writer = Hdf5Writer( output_dir = output_dir, image_height = image_height, image_width = image_width, image_channels = image_channels, dset_limit = hdf5_dset_limit, compression = compression, ) while (threads_done < num_threads) or not write_queue.empty(): # Send update every 2 seconds if time.time() - wait_time > 2: logger.debug('Processed %d/%d' % (images_written, image_count)) wait_time = time.time() processed_something = False if not summary_queue.empty(): result_count, result_sum = summary_queue.get() images_loaded += result_count # Update total_image_sum if compute_mean and result_count > 0 and result_sum is not None: if image_sum is None: image_sum = result_sum else: image_sum += result_sum threads_done += 1 processed_something = True if not write_queue.empty(): batch.append(write_queue.get()) if len(batch) == batch_size: writer.write_batch(batch) images_written += len(batch) batch = [] processed_something = True if not processed_something: time.sleep(0.2) if len(batch) > 0: writer.write_batch(batch) images_written += len(batch) assert images_written == writer.count() if images_loaded == 0: raise LoadError('no images loaded from input file') logger.debug('%s images loaded' % images_loaded) if images_written == 0: raise WriteError('no images written to database') logger.info('%s images written to database' % images_written) if compute_mean: _save_means(image_sum, images_written, mean_files) def _fill_load_queue(filename, queue, shuffle): """ Fill the queue with data from the input file Print the category distribution Returns the number of lines added to the queue NOTE: This can be slow on a large input file, but we need the total image count in order to report the progress, so we might as well read it all """ total_lines = 0 valid_lines = 0 distribution = Counter() with open(filename) as infile: if shuffle: lines = infile.readlines() # less memory efficient random.shuffle(lines) for line in lines: total_lines += 1 try: result = _parse_line(line, distribution) valid_lines += 1 queue.put(result) except ParseLineError: pass else: for line in infile: # more memory efficient total_lines += 1 try: result = _parse_line(line, distribution) valid_lines += 1 queue.put(result) except ParseLineError: pass logger.debug('%s total lines in file' % total_lines) if valid_lines == 0: raise BadInputFileError('No valid lines in input file') logger.info('%s valid lines in file' % valid_lines) for key in sorted(distribution): logger.debug('Category %s has %d images.' % (key, distribution[key])) return valid_lines def _parse_line(line, distribution): """ Parse a line in the input file into (path, label) """ line = line.strip() if not line: raise ParseLineError # Expect format - [/]path/to/file.jpg 123 match = re.match(r'(.+)\s+(\d+)\s*$', line) if match is None: raise ParseLineError path = match.group(1) label = int(match.group(2)) distribution[label] += 1 return path, label def _calculate_batch_size(image_count, is_hdf5=False, hdf5_dset_limit=None, image_channels=None, image_height=None, image_width=None): """ Calculates an appropriate batch size for creating this database """ if is_hdf5 and hdf5_dset_limit is not None: return min(100, image_count, hdf5_dset_limit/(image_channels*image_height*image_width)) else: return min(100, image_count) def _calculate_num_threads(batch_size, shuffle): """ Calculates an appropriate number of threads for creating this database """ if shuffle: return min(10, int(round(math.sqrt(batch_size)))) else: #XXX This is the only way to preserve order for now # This obviously hurts performance considerably return 1 def _load_thread(load_queue, write_queue, summary_queue, image_width, image_height, image_channels, resize_mode, image_folder, compute_mean, backend=None, encoding=None): """ Consumes items in load_queue Produces items to write_queue Stores cumulative results in summary_queue """ images_added = 0 if compute_mean: image_sum = _initial_image_sum(image_width, image_height, image_channels) else: image_sum = None while not load_queue.empty(): try: path, label = load_queue.get(True, 0.05) except Queue.Empty: continue # prepend path with image_folder, if appropriate if not utils.is_url(path) and image_folder and not os.path.isabs(path): path = os.path.join(image_folder, path) try: image = utils.image.load_image(path) except utils.errors.LoadImageError as e: logger.warning('[%s] %s: %s' % (path, type(e).__name__, e) ) continue image = utils.image.resize_image(image, image_height, image_width, channels = image_channels, resize_mode = resize_mode, ) if compute_mean: image_sum += image if backend == 'lmdb': datum = _array_to_datum(image, label, encoding) write_queue.put(datum) else: write_queue.put((image, label)) images_added += 1 summary_queue.put((images_added, image_sum)) def _initial_image_sum(width, height, channels): """ Returns an array of zeros that will be used to store the accumulated sum of images """ if channels == 1: return np.zeros((height, width), np.float64) else: return np.zeros((height, width, channels), np.float64) def _array_to_datum(image, label, encoding): """ Create a caffe Datum from a numpy.ndarray """ if not encoding: # Transform to caffe's format requirements if image.ndim == 3: # Transpose to (channels, height, width) image = image.transpose((2,0,1)) if image.shape[0] == 3: # channel swap # XXX see issue #59 image = image[[2,1,0],...] elif image.ndim == 2: # Add a channels axis image = image[np.newaxis,:,:] else: raise Exception('Image has unrecognized shape: "%s"' % image.shape) datum = caffe.io.array_to_datum(image, label) else: datum = caffe_pb2.Datum() if image.ndim == 3: datum.channels = image.shape[2] else: datum.channels = 1 datum.height = image.shape[0] datum.width = image.shape[1] datum.label = label s = StringIO() if encoding == 'png': PIL.Image.fromarray(image).save(s, format='PNG') elif encoding == 'jpg': PIL.Image.fromarray(image).save(s, format='JPEG', quality=90) else: raise ValueError('Invalid encoding type') datum.data = s.getvalue() datum.encoded = True return datum def _write_batch_lmdb(db, batch, image_count): """ Write a batch to an LMDB database """ try: with db.begin(write=True) as lmdb_txn: for i, datum in enumerate(batch): key = '%08d_%d' % (image_count + i, datum.label) lmdb_txn.put(key, datum.SerializeToString()) except lmdb.MapFullError: # double the map_size curr_limit = db.info()['map_size'] new_limit = curr_limit*2 try: db.set_mapsize(new_limit) # double it except AttributeError as e: version = tuple(int(x) for x in lmdb.__version__.split('.')) if version < (0,87): raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__) else: raise e # try again _write_batch_lmdb(db, batch, image_count) def _save_means(image_sum, image_count, mean_files): """ Save mean[s] to file """ mean = np.around(image_sum / image_count).astype(np.uint8) for mean_file in mean_files: if mean_file.lower().endswith('.npy'): np.save(mean_file, mean) elif mean_file.lower().endswith('.binaryproto'): data = mean # Transform to caffe's format requirements if data.ndim == 3: # Transpose to (channels, height, width) data = data.transpose((2,0,1)) if data.shape[0] == 3: # channel swap # XXX see issue #59 data = data[[2,1,0],...] elif mean.ndim == 2: # Add a channels axis data = data[np.newaxis,:,:] blob = caffe_pb2.BlobProto() blob.num = 1 blob.channels, blob.height, blob.width = data.shape blob.data.extend(data.astype(float).flat) with open(mean_file, 'wb') as outfile: outfile.write(blob.SerializeToString()) elif mean_file.lower().endswith(('.jpg', '.jpeg', '.png')): image = PIL.Image.fromarray(mean) image.save(mean_file) else: logger.warning('Unrecognized file extension for mean file: "%s"' % mean_file) continue logger.info('Mean saved at "%s"' % mean_file) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Create-Db tool - DIGITS') ### Positional arguments parser.add_argument('input_file', help='An input file of labeled images') parser.add_argument('output_dir', help='Path to the output database') parser.add_argument('width', type=int, help='width of resized images' ) parser.add_argument('height', type=int, help='height of resized images' ) ### Optional arguments parser.add_argument('-c', '--channels', type=int, default=3, help='channels of resized images (1 for grayscale, 3 for color [default])' ) parser.add_argument('-r', '--resize_mode', help='resize mode for images (must be "crop", "squash" [default], "fill" or "half_crop")' ) parser.add_argument('-m', '--mean_file', action='append', help="location to output the image mean (doesn't save mean if not specified)") parser.add_argument('-f', '--image_folder', help='folder containing the images (if the paths in input_file are not absolute)') parser.add_argument('-s', '--shuffle', action='store_true', help='Shuffle images before saving' ) parser.add_argument('-e', '--encoding', help = 'Image encoding format (jpg/png)' ) parser.add_argument('-C', '--compression', help = 'Database compression format (gzip)' ) parser.add_argument('-b', '--backend', default='lmdb', help = 'The database backend - lmdb[default] or hdf5') parser.add_argument('--lmdb_map_size', type=int, help = 'The initial map size for LMDB (in MB)') parser.add_argument('--hdf5_dset_limit', type=int, default=2**31, help = 'The size limit for HDF5 datasets') args = vars(parser.parse_args()) if args['lmdb_map_size']: # convert from MB to B args['lmdb_map_size'] <<= 20 try: create_db(args['input_file'], args['output_dir'], args['width'], args['height'], args['channels'], args['backend'], resize_mode = args['resize_mode'], image_folder = args['image_folder'], shuffle = args['shuffle'], mean_files = args['mean_file'], encoding = args['encoding'], compression = args['compression'], lmdb_map_size = args['lmdb_map_size'], hdf5_dset_limit = args['hdf5_dset_limit'], ) except Exception as e: logger.error('%s: %s' % (type(e).__name__, e.message)) raise
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### import sys import os import pkg_resources import StringIO from sys import argv, executable from socket import AF_INET from twisted.internet import reactor from twisted.python import log from twisted.internet import reactor from twisted.internet.protocol import Factory from twisted.web.server import Site from twisted.web.static import File from autobahn.websocket import parseWsUrl from autobahn.twisted.websocket import WebSocketServerFactory, \ WebSocketServerProtocol from autobahn.util import Stopwatch # make sure we run a capable OS/reactor ## startupMsgs = [] if 'bsd' in sys.platform: from twisted.internet import kqreactor kqreactor.install() startupMsgs.append("Alrighty: you run a capable kqueue platform - good job!") elif sys.platform.startswith('linux'): from twisted.internet import epollreactor epollreactor.install() startupMsgs.append("Alrighty: you run a capable epoll platform - good job!") elif sys.platform.startswith('darwin'): from twisted.internet import kqreactor kqreactor.install() startupMsgs.append("Huh, you run OSX and have kqueue, but don't be disappointed when performance sucks;)") elif sys.platform == 'win32': raise Exception("Sorry dude, Twisted/Windows select/iocp reactors lack the necessary bits.") else: raise Exception("Hey man, what OS are you using?") startupMsgs.append("Using Twisted reactor class %s on Twisted %s" % (str(reactor.__class__), pkg_resources.require("Twisted")[0].version)) hasStatprof = False try: import statprof startupMsgs.append("statprof found! you may enable statistical profiling") hasStatprof = True except ImportError: startupMsgs.append("statprof not installed - no profiling available") class Stats: def __init__(self): # stats period self.period = 0 # currently connected client self.clients = 0 # total (running) stats self.tMsgs = 0 self.tOctets = 0 self.tHandshakes = 0 self.tOctetsWireIn = 0 self.tOctetsWireOut = 0 self.stopwatch = Stopwatch(start=False) # period stats self._advance() def _advance(self): self.period += 1 self.pMsgs = 0 self.pOctets = 0 self.pHandshakes = 0 self.pOctetsWireIn = 0 self.pOctetsWireOut = 0 self.stopwatch.resume() def trackHandshake(self): self.tHandshakes += 1 self.pHandshakes += 1 def trackMsg(self, length): self.tMsgs += 1 self.pMsgs += 1 self.tOctets += length self.pOctets += length def trackOctetsWireIn(self, count): self.tOctetsWireIn += count self.pOctetsWireIn += count def trackOctetsWireOut(self, count): self.tOctetsWireOut += count self.pOctetsWireOut += count def stats(self, advance=True): elapsed = self.stopwatch.stop() s = ("Period No. : %d\n" + "Period duration : %.3f s\n" + "Connected clients : %d\n" + "\n" + "Period\n" + " Handshakes : %20d # %20d #/s\n" + " Echo'ed msgs : %20d # %20d #/s\n" + " Echo'ed octets : %20d B %20d B/s\n" + " Wire octets in : %20d B %20d B/s\n" + " Wire octets out : %20d B %20d B/s\n" + "\n" + "Total\n" + " Handshakes : %20d #\n" + " Echo'ed msgs : %20d #\n" + " Echo'ed octets : %20d B\n" + " Wire octets in : %20d B\n" + " Wire octets out : %20d B\n" + "" ) % (self.period, round(elapsed, 3), self.clients, self.pHandshakes, round(float(self.pHandshakes) / elapsed), self.pMsgs, round(float(self.pMsgs) / elapsed), self.pOctets, round(float(self.pOctets) / elapsed), self.pOctetsWireIn, round(float(self.pOctetsWireIn) / elapsed), self.pOctetsWireOut, round(float(self.pOctetsWireOut) / elapsed), self.tHandshakes, self.tMsgs, self.tOctets, self.tOctetsWireIn, self.tOctetsWireOut, ) self._advance() return s class EchoServerProtocol(WebSocketServerProtocol): def onOpen(self): self.factory.stats.clients += 1 self.factory.stats.trackHandshake() def onMessage(self, msg, binary): self.sendMessage(msg, binary) self.factory.stats.trackMsg(len(msg)) def onClose(self, wasClean, code, reason): self.factory.stats.clients -= 1 def connectionLost(self, reason): WebSocketServerProtocol.connectionLost(self, reason) self.factory.stats.trackOctetsWireIn(self.trafficStats.preopenIncomingOctetsWireLevel + self.trafficStats.incomingOctetsWireLevel) self.factory.stats.trackOctetsWireOut(self.trafficStats.preopenOutgoingOctetsWireLevel + self.trafficStats.outgoingOctetsWireLevel) class EchoServerFactory(WebSocketServerFactory): protocol = EchoServerProtocol def __init__(self, wsuri, debug=False): WebSocketServerFactory.__init__(self, wsuri, debug=debug, debugCodePaths=debug) self.stats = Stats() # export PYPYLOG="jit-log-opt,jit-backend:pypy.log" # Run under "perf" and enable PyPy JIT logging ## # Notes: ## # - setting an env var (outside root) will NOT work (not propagated) # - setting in code also will NOT work ## # sudo PYPYLOG="jit-log-opt,jit-backend:pypy.log" perf record ~/pypy-20131102/bin/pypy server.py --workers 4 def master(options): """ Start of the master process. """ if not options.silence: print "Master started on PID %s" % os.getpid() # start embedded Web server if asked for (this only runs on master) ## if options.port: webdir = File(".") web = Site(webdir) web.log = lambda _: None # disable annoyingly verbose request logging reactor.listenTCP(options.port, web) # we just need some factory like thing .. it won't be used on master anyway # for actual socket accept ## factory = Factory() # create socket, bind and listen .. port = reactor.listenTCP(options.wsport, factory, backlog=options.backlog) # .. but immediately stop reading: we only want to accept on workers, not master port.stopReading() # fire off background workers ## for i in range(options.workers): args = [executable, "-u", __file__, "--fd", str(port.fileno()), "--cpuid", str(i)] # pass on cmd line args to worker .. args.extend(sys.argv[1:]) reactor.spawnProcess( None, executable, args, childFDs={0: 0, 1: 1, 2: 2, port.fileno(): port.fileno()}, env=os.environ) reactor.run() PROFILER_FREQ = 2000 def worker(options): """ Start background worker process. """ workerPid = os.getpid() if not options.noaffinity: p = psutil.Process(workerPid) print "affinity [before]", p.get_cpu_affinity() p.set_cpu_affinity([options.cpuid]) print "affinity [after]", p.get_cpu_affinity() factory = EchoServerFactory(options.wsuri, debug=options.debug) # The master already created the socket, just start listening and accepting ## reactor.adoptStreamPort(options.fd, AF_INET, factory) if not options.silence: print "Worker started on PID %s using factory %s and protocol %s" % (workerPid, factory, factory.protocol) # print "Worker %d PYPYLOG=%s" % (workerPid, os.environ.get('PYPYLOG', None)) if options.profile: statprof.reset(PROFILER_FREQ) statprof.start() if not options.silence: def stat(): if options.profile: statprof.stop() output = StringIO.StringIO() output.write("-" * 80 + "\n") output.write("Worker Statistics (PID %s)\n\n%s" % (workerPid, factory.stats.stats())) if options.profile: output.write("\n") # format = statprof.DisplayFormats.ByLine # format = statprof.DisplayFormats.ByMethod # statprof.display(output, format = format) statprof.display(output) output.write("-" * 80 + "\n\n") sys.stdout.write(output.getvalue()) if options.profile: statprof.reset(PROFILER_FREQ) statprof.start() reactor.callLater(options.interval, stat) reactor.callLater(options.interval, stat) if False: import cProfile print "RUNNING cProfile" cProfile.run('reactor.run()') else: reactor.run() # /usr/include/valgrind/valgrind.h # valgrind --tool=callgrind python server.py --wsuri ws://127.0.0.1:9000 # http://valgrind.org/docs/manual/cg-manual.html # http://valgrind.org/docs/manual/cl-manual.html # https://bitbucket.org/pypy/jitviewer # http://morepypy.blogspot.de/2011/08/visualization-of-jitted-code.html # http://people.cs.uct.ac.za/~tmullins/work/writeup.pdf # list(range(psutil.NUM_CPUS)) # p.get_cpu_affinity() # p.set_cpu_affinity([0]) # p.set_nice(psutil.HIGH_PRIORITY_CLASS) if __name__ == '__main__': import argparse import psutil DEFAULT_WORKERS = psutil.NUM_CPUS parser = argparse.ArgumentParser(description='Autobahn WebSocket Echo Multicore Server') parser.add_argument('--wsuri', dest='wsuri', type=str, default='ws://localhost:9000', help='The WebSocket URI the server is listening on, e.g. ws://localhost:9000.') parser.add_argument('--port', dest='port', type=int, default=8080, help='Port to listen on for embedded Web server. Set to 0 to disable.') parser.add_argument('--workers', dest='workers', type=int, default=DEFAULT_WORKERS, help='Number of workers to spawn - should fit the number of (physical) CPU cores.') parser.add_argument('--noaffinity', dest='noaffinity', action="store_true", default=False, help='Do not set worker/CPU affinity.') parser.add_argument('--backlog', dest='backlog', type=int, default=8192, help='TCP accept queue depth. You must tune your OS also as this is just advisory!') parser.add_argument('--silence', dest='silence', action="store_true", default=False, help='Silence log output.') parser.add_argument('--debug', dest='debug', action="store_true", default=False, help='Enable WebSocket debug output.') parser.add_argument('--interval', dest='interval', type=int, default=5, help='Worker stats update interval.') parser.add_argument('--profile', dest='profile', action="store_true", default=False, help='Enable profiling.') parser.add_argument('--fd', dest='fd', type=int, default=None, help='If given, this is a worker which will use provided FD and all other options are ignored.') parser.add_argument('--cpuid', dest='cpuid', type=int, default=None, help='If given, this is a worker which will use provided CPU core to set its affinity.') options = parser.parse_args() if options.profile and not hasStatprof: raise Exception("profiling requested, but statprof not installed") # parse WS URI into components and forward via options # FIXME: add TLS support isSecure, host, wsport, resource, path, params = parseWsUrl(options.wsuri) options.wsport = wsport # if not options.silence: # log.startLogging(sys.stdout) if options.fd is not None: # run worker worker(options) else: if not options.silence: for m in startupMsgs: print m # run master master(options)
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from builtins import str import dill import inspect import os import pickle import subprocess import sys import types from airflow.exceptions import AirflowException from airflow.models import BaseOperator, SkipMixin from airflow.utils.decorators import apply_defaults from airflow.utils.file import TemporaryDirectory from textwrap import dedent class PythonOperator(BaseOperator): """ Executes a Python callable :param python_callable: A reference to an object that is callable :type python_callable: python callable :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function :type op_kwargs: dict :param op_args: a list of positional arguments that will get unpacked when calling your callable :type op_args: list :param provide_context: if set to true, Airflow will pass a set of keyword arguments that can be used in your function. This set of kwargs correspond exactly to what you can use in your jinja templates. For this to work, you need to define `**kwargs` in your function header. :type provide_context: bool :param templates_dict: a dictionary where the values are templates that will get templated by the Airflow engine sometime between ``__init__`` and ``execute`` takes place and are made available in your callable's context after the template has been applied :type templates_dict: dict of str :param templates_exts: a list of file extensions to resolve while processing templated fields, for examples ``['.sql', '.hql']`` :type templates_exts: list(str) """ template_fields = ('templates_dict',) template_ext = tuple() ui_color = '#ffefeb' @apply_defaults def __init__( self, python_callable, op_args=None, op_kwargs=None, provide_context=False, templates_dict=None, templates_exts=None, *args, **kwargs): super(PythonOperator, self).__init__(*args, **kwargs) if not callable(python_callable): raise AirflowException('`python_callable` param must be callable') self.python_callable = python_callable self.op_args = op_args or [] self.op_kwargs = op_kwargs or {} self.provide_context = provide_context self.templates_dict = templates_dict if templates_exts: self.template_ext = templates_exts def execute(self, context): if self.provide_context: context.update(self.op_kwargs) context['templates_dict'] = self.templates_dict self.op_kwargs = context return_value = self.execute_callable() self.log.info("Done. Returned value was: %s", return_value) return return_value def execute_callable(self): return self.python_callable(*self.op_args, **self.op_kwargs) class BranchPythonOperator(PythonOperator, SkipMixin): """ Allows a workflow to "branch" or follow a single path following the execution of this task. It derives the PythonOperator and expects a Python function that returns the task_id to follow. The task_id returned should point to a task directly downstream from {self}. All other "branches" or directly downstream tasks are marked with a state of ``skipped`` so that these paths can't move forward. The ``skipped`` states are propageted downstream to allow for the DAG state to fill up and the DAG run's state to be inferred. Note that using tasks with ``depends_on_past=True`` downstream from ``BranchPythonOperator`` is logically unsound as ``skipped`` status will invariably lead to block tasks that depend on their past successes. ``skipped`` states propagates where all directly upstream tasks are ``skipped``. """ def execute(self, context): branch = super(BranchPythonOperator, self).execute(context) self.log.info("Following branch %s", branch) self.log.info("Marking other directly downstream tasks as skipped") downstream_tasks = context['task'].downstream_list self.log.debug("Downstream task_ids %s", downstream_tasks) skip_tasks = [t for t in downstream_tasks if t.task_id != branch] if downstream_tasks: self.skip(context['dag_run'], context['ti'].execution_date, skip_tasks) self.log.info("Done.") class ShortCircuitOperator(PythonOperator, SkipMixin): """ Allows a workflow to continue only if a condition is met. Otherwise, the workflow "short-circuits" and downstream tasks are skipped. The ShortCircuitOperator is derived from the PythonOperator. It evaluates a condition and short-circuits the workflow if the condition is False. Any downstream tasks are marked with a state of "skipped". If the condition is True, downstream tasks proceed as normal. The condition is determined by the result of `python_callable`. """ def execute(self, context): condition = super(ShortCircuitOperator, self).execute(context) self.log.info("Condition result is %s", condition) if condition: self.log.info('Proceeding with downstream tasks...') return self.log.info('Skipping downstream tasks...') downstream_tasks = context['task'].get_flat_relatives(upstream=False) self.log.debug("Downstream task_ids %s", downstream_tasks) if downstream_tasks: self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks) self.log.info("Done.") class PythonVirtualenvOperator(PythonOperator): """ Allows one to run a function in a virtualenv that is created and destroyed automatically (with certain caveats). The function must be defined using def, and not be part of a class. All imports must happen inside the function and no variables outside of the scope may be referenced. A global scope variable named virtualenv_string_args will be available (populated by string_args). In addition, one can pass stuff through op_args and op_kwargs, and one can use a return value. Note that if your virtualenv runs in a different Python major version than Airflow, you cannot use return values, op_args, or op_kwargs. You can use string_args though. :param python_callable: A python function with no references to outside variables, defined with def, which will be run in a virtualenv :type python_callable: function :param requirements: A list of requirements as specified in a pip install command :type requirements: list(str) :param python_version: The Python version to run the virtualenv with. Note that both 2 and 2.7 are acceptable forms. :type python_version: str :param use_dill: Whether to use dill to serialize the args and result (pickle is default). This allow more complex types but requires you to include dill in your requirements. :type use_dill: bool :param system_site_packages: Whether to include system_site_packages in your virtualenv. See virtualenv documentation for more information. :type system_site_packages: bool :param op_args: A list of positional arguments to pass to python_callable. :type op_kwargs: list :param op_kwargs: A dict of keyword arguments to pass to python_callable. :type op_kwargs: dict :param string_args: Strings that are present in the global var virtualenv_string_args, available to python_callable at runtime as a list(str). Note that args are split by newline. :type string_args: list(str) :param templates_dict: a dictionary where the values are templates that will get templated by the Airflow engine sometime between ``__init__`` and ``execute`` takes place and are made available in your callable's context after the template has been applied :type templates_dict: dict of str :param templates_exts: a list of file extensions to resolve while processing templated fields, for examples ``['.sql', '.hql']`` :type templates_exts: list(str) """ def __init__(self, python_callable, requirements=None, python_version=None, use_dill=False, system_site_packages=True, op_args=None, op_kwargs=None, string_args=None, templates_dict=None, templates_exts=None, *args, **kwargs): super(PythonVirtualenvOperator, self).__init__( python_callable=python_callable, op_args=op_args, op_kwargs=op_kwargs, templates_dict=templates_dict, templates_exts=templates_exts, provide_context=False, *args, **kwargs) self.requirements = requirements or [] self.string_args = string_args or [] self.python_version = python_version self.use_dill = use_dill self.system_site_packages = system_site_packages # check that dill is present if needed dill_in_requirements = map(lambda x: x.lower().startswith('dill'), self.requirements) if (not system_site_packages) and use_dill and not any(dill_in_requirements): raise AirflowException('If using dill, dill must be in the environment ' + 'either via system_site_packages or requirements') # check that a function is passed, and that it is not a lambda if (not isinstance(self.python_callable, types.FunctionType) or self.python_callable.__name__ == (lambda x: 0).__name__): raise AirflowException('{} only supports functions for python_callable arg', self.__class__.__name__) # check that args are passed iff python major version matches if (python_version is not None and str(python_version)[0] != str(sys.version_info[0]) and self._pass_op_args()): raise AirflowException("Passing op_args or op_kwargs is not supported across " "different Python major versions " "for PythonVirtualenvOperator. Please use string_args.") def execute_callable(self): with TemporaryDirectory(prefix='venv') as tmp_dir: if self.templates_dict: self.op_kwargs['templates_dict'] = self.templates_dict # generate filenames input_filename = os.path.join(tmp_dir, 'script.in') output_filename = os.path.join(tmp_dir, 'script.out') string_args_filename = os.path.join(tmp_dir, 'string_args.txt') script_filename = os.path.join(tmp_dir, 'script.py') # set up virtualenv self._execute_in_subprocess(self._generate_virtualenv_cmd(tmp_dir)) cmd = self._generate_pip_install_cmd(tmp_dir) if cmd: self._execute_in_subprocess(cmd) self._write_args(input_filename) self._write_script(script_filename) self._write_string_args(string_args_filename) # execute command in virtualenv self._execute_in_subprocess( self._generate_python_cmd(tmp_dir, script_filename, input_filename, output_filename, string_args_filename)) return self._read_result(output_filename) def _pass_op_args(self): # we should only pass op_args if any are given to us return len(self.op_args) + len(self.op_kwargs) > 0 def _execute_in_subprocess(self, cmd): try: self.log.info("Executing cmd\n{}".format(cmd)) output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, close_fds=True) if output: self.log.info("Got output\n{}".format(output)) except subprocess.CalledProcessError as e: self.log.info("Got error output\n{}".format(e.output)) raise def _write_string_args(self, filename): # writes string_args to a file, which are read line by line with open(filename, 'w') as f: f.write('\n'.join(map(str, self.string_args))) def _write_args(self, input_filename): # serialize args to file if self._pass_op_args(): with open(input_filename, 'wb') as f: arg_dict = ({'args': self.op_args, 'kwargs': self.op_kwargs}) if self.use_dill: dill.dump(arg_dict, f) else: pickle.dump(arg_dict, f) def _read_result(self, output_filename): if os.stat(output_filename).st_size == 0: return None with open(output_filename, 'rb') as f: try: if self.use_dill: return dill.load(f) else: return pickle.load(f) except ValueError: self.log.error("Error deserializing result. Note that result deserialization " "is not supported across major Python versions.") raise def _write_script(self, script_filename): with open(script_filename, 'w') as f: python_code = self._generate_python_code() self.log.debug('Writing code to file\n{}'.format(python_code)) f.write(python_code) def _generate_virtualenv_cmd(self, tmp_dir): cmd = ['virtualenv', tmp_dir] if self.system_site_packages: cmd.append('--system-site-packages') if self.python_version is not None: cmd.append('--python=python{}'.format(self.python_version)) return cmd def _generate_pip_install_cmd(self, tmp_dir): if len(self.requirements) == 0: return [] else: # direct path alleviates need to activate cmd = ['{}/bin/pip'.format(tmp_dir), 'install'] return cmd + self.requirements def _generate_python_cmd(self, tmp_dir, script_filename, input_filename, output_filename, string_args_filename): # direct path alleviates need to activate return ['{}/bin/python'.format(tmp_dir), script_filename, input_filename, output_filename, string_args_filename] def _generate_python_code(self): if self.use_dill: pickling_library = 'dill' else: pickling_library = 'pickle' fn = self.python_callable # dont try to read pickle if we didnt pass anything if self._pass_op_args(): load_args_line = 'with open(sys.argv[1], "rb") as f: arg_dict = {}.load(f)'.format(pickling_library) else: load_args_line = 'arg_dict = {"args": [], "kwargs": {}}' # no indents in original code so we can accept any type of indents in the original function # we deserialize args, call function, serialize result if necessary return dedent("""\ import {pickling_library} import sys {load_args_code} args = arg_dict["args"] kwargs = arg_dict["kwargs"] with open(sys.argv[3], 'r') as f: virtualenv_string_args = list(map(lambda x: x.strip(), list(f))) {python_callable_lines} res = {python_callable_name}(*args, **kwargs) with open(sys.argv[2], 'wb') as f: res is not None and {pickling_library}.dump(res, f) """).format( load_args_code=load_args_line, python_callable_lines=dedent(inspect.getsource(fn)), python_callable_name=fn.__name__, pickling_library=pickling_library) self.log.info("Done.")
# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # pylint: disable=no-value-for-parameter, protected-access, arguments-differ # pylint: disable=unused-argument import unittest from argus.backends.heat import heat_backend from argus import exceptions from heatclient import exc try: import unittest.mock as mock except ImportError: import mock class FakeBaseHeatBackend(heat_backend.BaseHeatBackend): def __inti__(self): super(FakeBaseHeatBackend, self).__init__() def get_remote_client(self): return mock.sentinel def remote_client(self): return mock.sentinel class TestBaseHeatBackend(unittest.TestCase): @mock.patch('argus.backends.heat.client.heat_client') @mock.patch('argus.backends.tempest.manager.APIManager') def setUp(self, mock_api_manager, mock_heat_client): self._base_heat_backend = FakeBaseHeatBackend() def test_build_template(self): instance_name = mock.sentinel key = mock.sentinel image_name = mock.sentinel flavor_name = mock.sentinel user_data = mock.sentinel floating_network_id = mock.sentinel private_net_id = mock.sentinel expected_template = { u'heat_template_version': u'2013-05-23', u'description': u'argus', u'resources': { u'server_floating_ip': { u'type': u'OS::Neutron::FloatingIP', u'properties': { u'floating_network_id': floating_network_id, u'port_id': {u'get_resource': u'server_port'} } }, instance_name: { u'type': u'OS::Nova::Server', u'properties': { u'key_name': key, u'image': image_name, u'flavor': flavor_name, u'user_data_format': 'RAW', u'user_data': user_data, u'networks': [ {u'port': {u'get_resource': u'server_port'}} ] } }, u'server_port': { u'type': u'OS::Neutron::Port', u'properties': { u'network_id': private_net_id, u'security_groups': [ {u'get_resource': u'server_security_group'} ] } }, u'server_security_group': { u'type': u'OS::Neutron::SecurityGroup', u'properties': { u'rules': [ {u'remote_ip_prefix': u'0.0.0.0/0', u'port_range_max': 5986, u'port_range_min': 5986, u'protocol': u'tcp'}, {u'remote_ip_prefix': u'0.0.0.0/0', u'port_range_max': 5985, u'port_range_min': 5985, u'protocol': u'tcp'}, {u'remote_ip_prefix': u'0.0.0.0/0', u'port_range_max': 3389, u'port_range_min': 3389, u'protocol': u'tcp'}, {u'remote_ip_prefix': u'0.0.0.0/0', u'port_range_max': 22, u'port_range_min': 22, u'protocol': u'tcp'} ], u'description': u'Add security group rules for server', u'name': u'security-group'} } } } result = self._base_heat_backend._build_template( instance_name, key, image_name, flavor_name, user_data, floating_network_id, private_net_id) self.assertEqual(result, expected_template) @mock.patch('argus.config.CONFIG.argus') def test_configure_network(self, mock_config): mock_config.dns_nameservers = mock.sentinel mock_credentials = mock.Mock() mock_credentials.subnet = {"id": mock.sentinel} (self._base_heat_backend._manager.subnets_client. update_subnet) = mock.Mock(return_value=mock.sentinel) self._base_heat_backend._configure_networking(mock_credentials) (self._base_heat_backend._manager.subnets_client.update_subnet. assert_called_once_with( mock_credentials.subnet["id"], dns_nameservers=mock_config.dns_nameservers)) @mock.patch('argus.backends.base.CloudBackend.setup_instance') @mock.patch('argus.config.CONFIG.openstack') def test_setup_instance(self, mock_config, mock_super): mock_config.image_ref = mock.sentinel mock_config.flavor_ref = mock.sentinel mock_manager = mock.Mock() mock_manager.image_client = mock.Mock() mock_manager.image_client.get_image_meta.return_value = { "name": mock.sentinel } mock_manager.flavors_client = mock.Mock() mock_manager.flavors_client.show_flavor.return_value = { "flavor": { "name": mock.sentinel } } mock_manager.create_keypair = mock.Mock() mock_manager.create_keypair.return_value = mock.sentinel mock_credentials = mock.Mock() mock_credentials.router = { 'external_gateway_info': { 'network_id': mock.sentinel } } mock_credentials.network = { "id": mock.sentinel } self._base_heat_backend._build_template = mock.Mock() self._base_heat_backend._build_template.return_value = mock.sentinel self._base_heat_backend._heat_client.stacks.create = mock.Mock() mock_manager.primary_credentials.return_value = mock_credentials self._base_heat_backend._configure_networking = mock.Mock() self._base_heat_backend._manager = mock_manager self._base_heat_backend.setup_instance() (mock_manager.image_client.get_image_meta. assert_called_once_with(mock_config.image_ref)) (mock_manager.flavors_client.show_flavor.assert_called_once_with( mock_config.flavor_ref)) (mock_manager.create_keypair.assert_called_once_with( name=self._base_heat_backend.__class__.__name__)) (mock_manager.primary_credentials.assert_called_once_with()) (self._base_heat_backend._configure_networking.assert_called_once_with( mock_credentials)) self._base_heat_backend._build_template.assert_called_once_with( self._base_heat_backend._name, self._base_heat_backend._keypair.name, mock_config.image_ref, mock_config.flavor_ref, self._base_heat_backend.userdata, mock.sentinel, mock.sentinel) fields = { 'stack_name': self._base_heat_backend._name, 'disable_rollback': True, 'parameters': {}, 'template': self._base_heat_backend._build_template.return_value, 'files': {}, 'environment': {}, } (self._base_heat_backend._heat_client.stacks.create. assert_called_once_with(**fields)) def _test_cleanup(self, reduce_=True, fails=False): if self._base_heat_backend._keypair: self._base_heat_backend._keypair = mock.Mock() self._base_heat_backend._keypair.destroy = mock.Mock() self._base_heat_backend._heat_client.stacks.list = mock.Mock() if reduce_ is False: (self._base_heat_backend._heat_client.stacks.list. return_value) = [] else: (self._base_heat_backend._heat_client.stacks.list. return_value) = [1] self._base_heat_backend._delete_floating_ip = mock.Mock() self._base_heat_backend._heat_client.stacks.delete = mock.Mock() if fails: (self._base_heat_backend._heat_client.stacks.delete. side_effect) = Exception self._base_heat_backend._wait_stacks = mock.Mock() self._base_heat_backend._manager.cleanup_credentials = mock.Mock() if fails: with self.assertRaises(Exception): result = self._base_heat_backend.cleanup() else: result = self._base_heat_backend.cleanup() if self._base_heat_backend._keypair: (self._base_heat_backend._keypair.destroy. assert_called_once_with()) if reduce_ is False: self.assertEqual(result, None) else: (self._base_heat_backend._delete_floating_ip. assert_called_once_with()) (self._base_heat_backend._heat_client.stacks.delete. assert_called_once_with( stack_id=self._base_heat_backend._name)) count = 1 if fails: count = 0 (self.assertEqual(self._base_heat_backend. _wait_stacks.call_count, count)) (self._base_heat_backend._manager.cleanup_credentials. assert_called_once_with()) def test_cleanup_no_reduce_destroy_key(self): self._base_heat_backend._keypair = mock.sentinel self._test_cleanup(reduce_=False) def test_cleanup_success(self): self._base_heat_backend._keypair = None self._test_cleanup() def test_cleanup_fails(self): self._base_heat_backend._keypair = None self._test_cleanup(fails=True) def test_get_stacks(self): list_ = [1, 2, 3, 4, 5] self._base_heat_backend._heat_client.stacks.list = mock.Mock() (self._base_heat_backend._heat_client.stacks.list. return_value) = list_ result = self._base_heat_backend._get_stacks() self.assertEqual(result, len(list_)) def test_wait_stacks_fails(self): raised_exception = exceptions.ArgusHeatTeardown( "All stacks failed to be deleted in time!") self._base_heat_backend._get_stacks = mock.Mock() self._base_heat_backend._get_stacks.return_value = 0 with self.assertRaises(exceptions.ArgusHeatTeardown) as ex: self._base_heat_backend._wait_stacks() self.assertEqual(ex.exception.message, str(raised_exception)) @mock.patch("time.sleep", return_value=None) def test_wait_stacks(self, _): self._base_heat_backend._get_stacks = mock.Mock() self._base_heat_backend._get_stacks.side_effect = [1, 1, 0] result = self._base_heat_backend._wait_stacks(retry_delay=1, retry_count=5) self.assertEqual(result, None) self.assertEqual(self._base_heat_backend._get_stacks.call_count, 3) def test_delete_floating_ip_fails(self): (self._base_heat_backend._manager.floating_ips_client. delete_floating_ip) = mock.Mock() self._base_heat_backend._floating_ip_resource = {"id": mock.sentinel} self._base_heat_backend._search_resource_until_status = mock.Mock() (self._base_heat_backend._search_resource_until_status. side_effect) = exceptions.ArgusError self._base_heat_backend._delete_floating_ip() def test_search_resoutce_until_status_http_error(self): self._base_heat_backend._name = "fake name" raised_exception = exceptions.ArgusError('Stack not found: %s' % self._base_heat_backend._name) self._base_heat_backend._heat_client.resources.list = mock.Mock() (self._base_heat_backend._heat_client.resources.list. side_effect) = exc.HTTPNotFound() with self.assertRaises(exceptions.ArgusError) as ex: self._base_heat_backend._search_resource_until_status( mock.sentinel) self.assertEqual(ex.exception.message, str(raised_exception)) def test_search_resource_until_status_limit_exceded(self): resource_name = "fake resource" self._base_heat_backend._name = "fake name" raised_exception = exceptions.ArgusError( "No resource %s found with name %s" % (resource_name, self._base_heat_backend._name)) self._base_heat_backend._name = "fake name" (self._base_heat_backend._heat_client.resources.list. side_effect) = raised_exception with self.assertRaises(exceptions.ArgusError) as ex: self._base_heat_backend._search_resource_until_status( resource_name, 0) self.assertEqual(ex.exception.message, str(raised_exception)) def test_search_resource_until_status_success(self): resource_name = mock.sentinel status_completed = mock.sentinel mock_list = mock.Mock() mock_resource = mock.Mock() mock_resource.resource_type = resource_name mock_resource.resource_status = status_completed mock_resource.physical_resource_id = mock.sentinel mock_list.return_value = [mock_resource] self._base_heat_backend._heat_client.resources.list = mock_list result = self._base_heat_backend._search_resource_until_status( resource_name, status=status_completed) self.assertEqual(result, mock_resource.physical_resource_id) @mock.patch("time.sleep", return_value=None) def test_search_resource_until_status_timeout(self, _): resource_name = "fake_resource" status_completed = mock.sentinel self._base_heat_backend._name = "fake_name" exp = exceptions.ArgusError("No resource %s found with name %s" % (resource_name, self._base_heat_backend._name)) mock_list = mock.Mock() mock_resource = mock.Mock() mock_resource.resource_type = resource_name mock_resource.resource_status = "fake status" mock_resource.physical_resource_id = mock.sentinel mock_list.return_value = [mock_resource] self._base_heat_backend._heat_client.resources.list = mock_list with self.assertRaises(exceptions.ArgusError) as ex: self._base_heat_backend._search_resource_until_status( resource_name, status=status_completed) self.assertEqual(ex.exception.message, str(exp)) @mock.patch("time.sleep", return_value=None) def test_search_resource_until_status_(self, _): resource_name = mock.sentinel status_completed = mock.sentinel exp = exceptions.ArgusError("No resource %s found with name %s" % (resource_name, self._base_heat_backend._name)) mock_list = mock.Mock() mock_resource = mock.Mock() mock_resource.resource_type = "fake_resource" mock_list.return_value = [mock_resource] self._base_heat_backend._heat_client.resources.list = mock_list with self.assertRaises(exceptions.ArgusError) as ex: self._base_heat_backend._search_resource_until_status( resource_name, status=status_completed) self.assertEqual(ex.exception.message, str(exp)) def test_internal_id(self): def fake_function(): return mock.sentinel mock_search_until_status = mock.Mock() mock_search_until_status.return_value = fake_function (self._base_heat_backend. _search_resource_until_status) = mock_search_until_status result = self._base_heat_backend._internal_id() self.assertEqual(result, fake_function()) (self._base_heat_backend._search_resource_until_status. assert_called_once_with(heat_backend.OS_NOVA_RESOURCE)) def test_internal_instance_id(self): self._base_heat_backend._internal_id = mock.sentinel result = self._base_heat_backend.internal_instance_id() self.assertEqual(result, self._base_heat_backend._internal_id) @mock.patch('argus.backends.heat.heat_backend.BaseHeatBackend.' '_search_resource_until_status') def test_floating_ip_resource(self, _): def fake_function(): return mock.sentinel mock_floating = mock.Mock() mock_floating.show_floating_ip.return_value = { "floating_ip": fake_function } self._base_heat_backend._manager.floating_ips_client = mock_floating result = self._base_heat_backend._floating_ip_resource() self.assertEqual(result, fake_function()) def test_floating_ip(self): self._base_heat_backend._floating_ip_resource = {"ip": mock.sentinel} result = self._base_heat_backend.floating_ip() self.assertEqual(result, self._base_heat_backend._floating_ip_resource['ip']) @mock.patch('argus.backends.tempest.manager.OUTPUT_SIZE') def test_instance_output(self, mock_output_size): self._base_heat_backend._manager = mock.Mock() (self._base_heat_backend._manager.instance_output. return_value) = mock.sentinel self._base_heat_backend.internal_instance_id = mock.Mock() result = self._base_heat_backend.instance_output() self.assertEqual( result, self._base_heat_backend._manager.instance_output.return_value) def test_reboot_instance(self): self._base_heat_backend._manager = mock.Mock() (self._base_heat_backend._manager.reboot_instance. return_value) = mock.sentinel self._base_heat_backend.internal_instance_id = mock.Mock() result = self._base_heat_backend.reboot_instance() self.assertEqual( result, self._base_heat_backend._manager.reboot_instance.return_value) self._base_heat_backend.internal_instance_id.assert_called_once_with() def test_instance_password(self): self._base_heat_backend.internal_instance_id = mock.Mock() self._base_heat_backend._keypair = mock.sentinel self._base_heat_backend._manager = mock.Mock() (self._base_heat_backend._manager.instance_password. return_value) = mock.sentinel result = self._base_heat_backend.instance_password() self.assertEqual( result, self._base_heat_backend._manager.instance_password.return_value) (self._base_heat_backend._manager.instance_password. assert_called_once_with( self._base_heat_backend.internal_instance_id(), self._base_heat_backend._keypair)) def test_private_key(self): self._base_heat_backend._keypair = mock.Mock() self._base_heat_backend._keypair.private_key = mock.sentinel result = self._base_heat_backend.private_key() self.assertEqual(result, self._base_heat_backend._keypair.private_key) def test_public_key(self): self._base_heat_backend._keypair = mock.Mock() self._base_heat_backend._keypair.public_key = mock.sentinel result = self._base_heat_backend.public_key() self.assertEqual(result, self._base_heat_backend._keypair.public_key) def test_instance_server(self): self._base_heat_backend._manager = mock.Mock() (self._base_heat_backend._manager.instance_server. return_value) = mock.sentinel self._base_heat_backend.internal_instance_id = mock.Mock() result = self._base_heat_backend.instance_server() self.assertEqual( result, self._base_heat_backend._manager.instance_server.return_value) self._base_heat_backend.internal_instance_id.assert_called_once_with() @mock.patch('argus.config.CONFIG.openstack') def test_get_image_by_ref(self, mock_config): self._base_heat_backend._manager.compute_images_client = mock.Mock() (self._base_heat_backend._manager.compute_images_client. show_image.return_value) = mock.sentinel mock_config.image_ref = mock.sentinel result = self._base_heat_backend.get_image_by_ref() self.assertEqual( result, self._base_heat_backend._manager.compute_images_client. show_image.return_value) (self._base_heat_backend._manager.compute_images_client.show_image. assert_called_once_with(mock_config.image_ref)) def test_get_mtu(self): self._base_heat_backend._manager = mock.Mock() self._base_heat_backend._manager.get_mtu.return_value = mock.sentinel result = self._base_heat_backend.get_mtu() self.assertEqual(result, self._base_heat_backend._manager.get_mtu.return_value)
def selfkeymaker(key): return key def gen_keymaker(prefix, suffix, keymaker): """ :param prefix: :type prefix: :class:`str` :param suffix: :type suffix: :class:`str` :param keymaker: :type keymaker: :func: :return: :rtype: :func: """ if keymaker: return keymaker if prefix or suffix: return lambda key: prefix + key + suffix return selfkeymaker class WrapObject(object): def __init__(self, redis): self._redis = redis self._result = None class KeyValueObject(WrapObject): def __init__(self, redis, prefix='', suffix='', keymaker=None): super(KeyValueObject, self).__init__(redis) self._keymaker = gen_keymaker(prefix, suffix, keymaker) def keys(self): pattern = self._keymaker('*') self._result = self._redis.keys(pattern) return self._result def __iter__(self): return iter(self.keys()) def __getitem__(self, item): key = self._keymaker(item) self._result = self._redis.get(key) return self._result def __setitem__(self, item, value): key = self._keymaker(item) self._result = self._redis.set(key, value) def __delitem__(self, item): key = self._keymaker(item) self._result = self._redis.delete(key) def __contains__(self, item): key = self._keymaker(item) self._result = self._redis.exists(key) return self._result def items(self): for key in self: yield key, class HashObject(WrapObject): def __init__(self, redis, datakey, prefix='', suffix='', keymaker=None): super(HashObject, self).__init__(redis) self._dkey = datakey self._keymaker = gen_keymaker(prefix, suffix, keymaker) def keys(self): self._result = self._redis.hkeys(self._dkey) return self._result def values(self): self._result = self._redis.hvals(self._dkey) return self._result def dict(self): self._result = self._redis.hgetall(self._dkey) return self._result def items(self): return self.dict().items() def __getitem__(self, item): key = self._keymaker(item) self._result = self._redis.hget(self._dkey, key) return self._result def __setitem__(self, item, value): key = self._keymaker(item) self._result = self._redis.hset(self._dkey, key, value) def __delitem__(self, item): key = self._keymaker(item) self._result = self._redis.hdel(self._dkey, key) def __contains__(self, item): key = self._keymaker(item) self._result = self._redis.hexists(self._dkey, key) def __iter__(self): return iter(self.keys()) def __len__(self): return self._redis.hlen(self._dkey) class ListObject(WrapObject): def __init__(self, redis, datakey): super(ListObject, self).__init__(redis) self._dkey = datakey def __getitem__(self, key): if isinstance(key, slice): self._result = self._redis.lrange(self._dkey, key.start, key.stop) return self._result else: self._result = self._redis.lindex(self._dkey, key) return self._result def __setitem__(self, item, value): self._result = self._redis.lset(self._dkey, item, value) def __len__(self): self._result = self._redis.llen(self._dkey) return self._result def __iter__(self): return iter(self[0:-1]) def append(self, item, *args): self._result = self._redis.rpush(self._dkey, item, *args) def lappend(self, item, *args): self._result = self._redis.lpush(self._dkey, item, *args) def pop(self): self._result = self._redis.lpop(self._dkey) return self._result def rpop(self): self._result = self._redis.rpop(self._dkey) return self._result class SetObject(WrapObject): def __init__(self, redis, datakey): super(SetObject, self).__init__(redis) self._dkey = datakey def add(self, member, *args): self._result = self._redis.sadd(self._dkey, member, *args) def __len__(self): self._result = self._redis.scard(self._dkey) return self._result def __sub__(self, other): self._result = self._redis.sdiff(self._dkey, other._dkey) return set(self._result) def intersection(self, other): self._result = self._redis.sinter(self._dkey, other._dkey) return set(self._result) def __contains__(self, item): self._result = self._redis.sismember(self._dkey, item) return self._result def pop(self): self._result = self._redis.spop(self._dkey) return self._result def remove(self, member, *args): self._result = self._redis.srem(self._dkey, member, *args) def union(self, other): self._result = self._redis.sunion(self._dkey, other._dkey) return set(self._result) class SortedSetValueObject(WrapObject): def __init__(self, redis, datakey): super(SortedSetValueObject, self).__init__(redis) self._dkey = datakey def __len__(self): self._result = self._redis.zcard(self._dkey) return self._result def __setitem__(self, key, value): self._result = self._redis.zadd(self._dkey, value, key) def __getitem__(self, key): self._result = self._redis.zscore(self._dkey, key) return self._result def __delitem__(self, key): self._result = self._redis.zrem(self._dkey, key) def rank(self, key): self._result = self._redis.zrank(self._dkey) class SortedSetRankObject(WrapObject): def __init__(self, redis, datakey): super(SortedSetRankObject, self).__init__(redis) self._dkey = datakey def __len__(self): self._result = self._redis.zcard(self._dkey) return self._result def __getitem__(self, key): if isinstance(key, slice): self._result = self._redis.zrange(key.start, key.stop) return self._result else: self._result = self._redis.zrange(key, key) return self._result[0] def add(self, member, score): self._result = self._redis.zadd(self._dkey, score, member) def remove(self, member): self._result = self._redis.zrem(self._dkey, member)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/users import base64 import secrets import uuid import flask import http.client from oslo_serialization import jsonutils from werkzeug import exceptions from keystone.api._shared import json_home_relations from keystone.application_credential import schema as app_cred_schema from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import utils from keystone.common import validation import keystone.conf from keystone import exception as ks_exception from keystone.i18n import _ from keystone.identity import schema from keystone import notifications from keystone.server import flask as ks_flask CRED_TYPE_EC2 = 'ec2' CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs ACCESS_TOKEN_ID_PARAMETER_RELATION = ( json_home_relations.os_oauth1_parameter_rel_func( parameter_name='access_token_id') ) def _convert_v3_to_ec2_credential(credential): # Prior to bug #1259584 fix, blob was stored unserialized # but it should be stored as a json string for compatibility # with the v3 credentials API. Fall back to the old behavior # for backwards compatibility with existing DB contents try: blob = jsonutils.loads(credential['blob']) except TypeError: blob = credential['blob'] return {'user_id': credential.get('user_id'), 'tenant_id': credential.get('project_id'), 'access': blob.get('access'), 'secret': blob.get('secret'), 'trust_id': blob.get('trust_id')} def _format_token_entity(entity): formatted_entity = entity.copy() access_token_id = formatted_entity['id'] user_id = formatted_entity.get('authorizing_user_id', '') if 'role_ids' in entity: formatted_entity.pop('role_ids') if 'access_secret' in entity: formatted_entity.pop('access_secret') url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s' '/roles' % {'user_id': user_id, 'access_token_id': access_token_id}) formatted_entity.setdefault('links', {}) formatted_entity['links']['roles'] = (ks_flask.base_url(url)) return formatted_entity def _check_unrestricted_application_credential(token): if 'application_credential' in token.methods: if not token.application_credential['unrestricted']: action = _("Using method 'application_credential' is not " "allowed for managing additional application " "credentials.") raise ks_exception.ForbiddenAction(action=action) def _build_user_target_enforcement(): target = {} try: target['user'] = PROVIDERS.identity_api.get_user( flask.request.view_args.get('user_id') ) if flask.request.view_args.get('group_id'): target['group'] = PROVIDERS.identity_api.get_group( flask.request.view_args.get('group_id') ) except ks_exception.NotFound: # nosec # Defer existence in the event the user doesn't exist, we'll # check this later anyway. pass return target def _build_enforcer_target_data_owner_and_user_id_match(): ref = {} if flask.request.view_args: credential_id = flask.request.view_args.get('credential_id') if credential_id is not None: hashed_id = utils.hash_access_key(credential_id) ref['credential'] = PROVIDERS.credential_api.get_credential( hashed_id) return ref def _update_request_user_id_attribute(): # This method handles a special case in policy enforcement. The application # credential API is underneath the user path (e.g., # /v3/users/{user_id}/application_credentials/{application_credential_id}). # The RBAC enforcer thinks the user to evaluate for application credential # ownership comes from the path, but it should come from the actual # application credential reference. By ensuring we pull the user ID from # the application credential, we close a loop hole where users could # effectively bypass authorization to view or delete any application # credential in the system, assuming the attacker knows the application # credential ID of another user. So long as the attacker matches the user # ID in the request path to the user in the token of the request, they can # pass the `rule:owner` policy check. This method protects against that by # ensuring we use the application credential user ID and not something # determined from the client. try: app_cred = ( PROVIDERS.application_credential_api.get_application_credential( flask.request.view_args.get('application_credential_id') ) ) flask.request.view_args['user_id'] = app_cred['user_id'] # This target isn't really used in the default policy for application # credentials, but we return it since we're using this method as a hook # to update the flask request variables, which are used later in the # keystone RBAC enforcer to populate the policy_dict, which ultimately # turns into target attributes. return {'user_id': app_cred['user_id']} except ks_exception.NotFound: # nosec # Defer existance in the event the application credential doesn't # exist, we'll check this later anyway. pass def _format_role_entity(role_id): role = PROVIDERS.role_api.get_role(role_id) formatted_entity = role.copy() if 'description' in role: formatted_entity.pop('description') if 'enabled' in role: formatted_entity.pop('enabled') return formatted_entity class UserResource(ks_flask.ResourceBase): collection_key = 'users' member_key = 'user' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='identity_api', method='get_user') def get(self, user_id=None): """Get a user resource or list users. GET/HEAD /v3/users GET/HEAD /v3/users/{user_id} """ if user_id is not None: return self._get_user(user_id) return self._list_users() def _get_user(self, user_id): """Get a user resource. GET/HEAD /v3/users/{user_id} """ ENFORCER.enforce_call( action='identity:get_user', build_target=_build_user_target_enforcement ) ref = PROVIDERS.identity_api.get_user(user_id) return self.wrap_member(ref) def _list_users(self): """List users. GET/HEAD /v3/users """ filters = ('domain_id', 'enabled', 'idp_id', 'name', 'protocol_id', 'unique_id', 'password_expires_at') target = None if self.oslo_context.domain_id: target = {'domain_id': self.oslo_context.domain_id} hints = self.build_driver_hints(filters) ENFORCER.enforce_call( action='identity:list_users', filters=filters, target_attr=target ) domain = self._get_domain_id_for_list_request() if domain is None and self.oslo_context.domain_id: domain = self.oslo_context.domain_id refs = PROVIDERS.identity_api.list_users( domain_scope=domain, hints=hints) # If the user making the request used a domain-scoped token, let's make # sure we filter out users that are not in that domain. Otherwise, we'd # be exposing users in other domains. This if statement is needed in # case _get_domain_id_for_list_request() short-circuits due to # configuration and protects against information from other domains # leaking to people who shouldn't see it. if self.oslo_context.domain_id: domain_id = self.oslo_context.domain_id users = [user for user in refs if user['domain_id'] == domain_id] else: users = refs return self.wrap_collection(users, hints=hints) def post(self): """Create a user. POST /v3/users """ user_data = self.request_body_json.get('user', {}) target = {'user': user_data} ENFORCER.enforce_call( action='identity:create_user', target_attr=target ) validation.lazy_validate(schema.user_create, user_data) user_data = self._normalize_dict(user_data) user_data = self._normalize_domain_id(user_data) ref = PROVIDERS.identity_api.create_user( user_data, initiator=self.audit_initiator) return self.wrap_member(ref), http.client.CREATED def patch(self, user_id): """Update a user. PATCH /v3/users/{user_id} """ ENFORCER.enforce_call( action='identity:update_user', build_target=_build_user_target_enforcement ) PROVIDERS.identity_api.get_user(user_id) user_data = self.request_body_json.get('user', {}) validation.lazy_validate(schema.user_update, user_data) self._require_matching_id(user_data) ref = PROVIDERS.identity_api.update_user( user_id, user_data, initiator=self.audit_initiator) return self.wrap_member(ref) def delete(self, user_id): """Delete a user. DELETE /v3/users/{user_id} """ ENFORCER.enforce_call( action='identity:delete_user', build_target=_build_user_target_enforcement ) PROVIDERS.identity_api.delete_user(user_id) return None, http.client.NO_CONTENT class UserChangePasswordResource(ks_flask.ResourceBase): @ks_flask.unenforced_api def get(self, user_id): # Special case, GET is not allowed. raise exceptions.MethodNotAllowed(valid_methods=['POST']) @ks_flask.unenforced_api def post(self, user_id): user_data = self.request_body_json.get('user', {}) validation.lazy_validate(schema.password_change, user_data) try: PROVIDERS.identity_api.change_password( user_id=user_id, original_password=user_data['original_password'], new_password=user_data['password'], initiator=self.audit_initiator) except AssertionError as e: raise ks_exception.Unauthorized( _('Error when changing user password: %s') % e ) return None, http.client.NO_CONTENT class UserProjectsResource(ks_flask.ResourceBase): collection_key = 'projects' member_key = 'project' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='resource_api', method='get_project') def get(self, user_id): filters = ('domain_id', 'enabled', 'name') ENFORCER.enforce_call(action='identity:list_user_projects', filters=filters, build_target=_build_user_target_enforcement) hints = self.build_driver_hints(filters) refs = PROVIDERS.assignment_api.list_projects_for_user(user_id) return self.wrap_collection(refs, hints=hints) class UserGroupsResource(ks_flask.ResourceBase): collection_key = 'groups' member_key = 'group' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='identity_api', method='get_group') def get(self, user_id): """Get groups for a user. GET/HEAD /v3/users/{user_id}/groups """ filters = ('name',) hints = self.build_driver_hints(filters) ENFORCER.enforce_call(action='identity:list_groups_for_user', build_target=_build_user_target_enforcement, filters=filters) refs = PROVIDERS.identity_api.list_groups_for_user(user_id=user_id, hints=hints) if (self.oslo_context.domain_id): filtered_refs = [] for ref in refs: if ref['domain_id'] == self.oslo_context.domain_id: filtered_refs.append(ref) refs = filtered_refs return self.wrap_collection(refs, hints=hints) class _UserOSEC2CredBaseResource(ks_flask.ResourceBase): collection_key = 'credentials' member_key = 'credential' @classmethod def _add_self_referential_link(cls, ref, collection_name=None): # NOTE(morgan): This should be refactored to have an EC2 Cred API with # a sane prefix instead of overloading the "_add_self_referential_link" # method. This was chosen as it more closely mirrors the pre-flask # code (for transition). path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s' url = ks_flask.base_url(path) % { 'user_id': ref['user_id'], 'credential_id': ref['access']} ref.setdefault('links', {}) ref['links']['self'] = url class UserOSEC2CredentialsResourceListCreate(_UserOSEC2CredBaseResource): def get(self, user_id): """List EC2 Credentials for user. GET/HEAD /v3/users/{user_id}/credentials/OS-EC2 """ ENFORCER.enforce_call(action='identity:ec2_list_credentials') PROVIDERS.identity_api.get_user(user_id) credential_refs = PROVIDERS.credential_api.list_credentials_for_user( user_id, type=CRED_TYPE_EC2) collection_refs = [ _convert_v3_to_ec2_credential(cred) for cred in credential_refs ] return self.wrap_collection(collection_refs) def post(self, user_id): """Create EC2 Credential for user. POST /v3/users/{user_id}/credentials/OS-EC2 """ target = {} target['credential'] = {'user_id': user_id} ENFORCER.enforce_call(action='identity:ec2_create_credential', target_attr=target) PROVIDERS.identity_api.get_user(user_id) tenant_id = self.request_body_json.get('tenant_id') PROVIDERS.resource_api.get_project(tenant_id) blob = dict( access=uuid.uuid4().hex, secret=uuid.uuid4().hex, trust_id=self.oslo_context.trust_id ) credential_id = utils.hash_access_key(blob['access']) cred_data = dict( user_id=user_id, project_id=tenant_id, blob=jsonutils.dumps(blob), id=credential_id, type=CRED_TYPE_EC2 ) PROVIDERS.credential_api.create_credential(credential_id, cred_data) ref = _convert_v3_to_ec2_credential(cred_data) return self.wrap_member(ref), http.client.CREATED class UserOSEC2CredentialsResourceGetDelete(_UserOSEC2CredBaseResource): @staticmethod def _get_cred_data(credential_id): cred = PROVIDERS.credential_api.get_credential(credential_id) if not cred or cred['type'] != CRED_TYPE_EC2: raise ks_exception.Unauthorized( message=_('EC2 access key not found.')) return _convert_v3_to_ec2_credential(cred) def get(self, user_id, credential_id): """Get a specific EC2 credential. GET/HEAD /users/{user_id}/credentials/OS-EC2/{credential_id} """ func = _build_enforcer_target_data_owner_and_user_id_match ENFORCER.enforce_call( action='identity:ec2_get_credential', build_target=func) PROVIDERS.identity_api.get_user(user_id) ec2_cred_id = utils.hash_access_key(credential_id) cred_data = self._get_cred_data(ec2_cred_id) return self.wrap_member(cred_data) def delete(self, user_id, credential_id): """Delete a specific EC2 credential. DELETE /users/{user_id}/credentials/OS-EC2/{credential_id} """ func = _build_enforcer_target_data_owner_and_user_id_match ENFORCER.enforce_call(action='identity:ec2_delete_credential', build_target=func) PROVIDERS.identity_api.get_user(user_id) ec2_cred_id = utils.hash_access_key(credential_id) self._get_cred_data(ec2_cred_id) PROVIDERS.credential_api.delete_credential(ec2_cred_id) return None, http.client.NO_CONTENT class _OAuth1ResourceBase(ks_flask.ResourceBase): collection_key = 'access_tokens' member_key = 'access_token' @classmethod def _add_self_referential_link(cls, ref, collection_name=None): # NOTE(morgan): This should be refactored to have an OAuth1 API with # a sane prefix instead of overloading the "_add_self_referential_link" # method. This was chosen as it more closely mirrors the pre-flask # code (for transition). ref.setdefault('links', {}) path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % { 'user_id': ref.get('authorizing_user_id', '') } ref['links']['self'] = ks_flask.base_url(path) + '/' + ref['id'] class OAuth1ListAccessTokensResource(_OAuth1ResourceBase): def get(self, user_id): """List OAuth1 Access Tokens for user. GET /v3/users/{user_id}/OS-OAUTH1/access_tokens """ ENFORCER.enforce_call(action='identity:list_access_tokens') if self.oslo_context.is_delegated_auth: raise ks_exception.Forbidden( _('Cannot list request tokens with a token ' 'issued via delegation.')) refs = PROVIDERS.oauth_api.list_access_tokens(user_id) formatted_refs = ([_format_token_entity(x) for x in refs]) return self.wrap_collection(formatted_refs) class OAuth1AccessTokenCRUDResource(_OAuth1ResourceBase): def get(self, user_id, access_token_id): """Get specific access token. GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} """ ENFORCER.enforce_call(action='identity:get_access_token') access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise ks_exception.NotFound() access_token = _format_token_entity(access_token) return self.wrap_member(access_token) def delete(self, user_id, access_token_id): """Delete specific access token. DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} """ ENFORCER.enforce_call( action='identity:ec2_delete_credential', build_target=_build_enforcer_target_data_owner_and_user_id_match) access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) reason = ( 'Invalidating the token cache because an access token for ' 'consumer %(consumer_id)s has been deleted. Authorization for ' 'users with OAuth tokens will be recalculated and enforced ' 'accordingly the next time they authenticate or validate a ' 'token.' % {'consumer_id': access_token['consumer_id']} ) notifications.invalidate_token_cache_notification(reason) PROVIDERS.oauth_api.delete_access_token( user_id, access_token_id, initiator=self.audit_initiator) return None, http.client.NO_CONTENT class OAuth1AccessTokenRoleListResource(ks_flask.ResourceBase): collection_key = 'roles' member_key = 'role' def get(self, user_id, access_token_id): """List roles for a user access token. GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/ {access_token_id}/roles """ ENFORCER.enforce_call(action='identity:list_access_token_roles') access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise ks_exception.NotFound() authed_role_ids = access_token['role_ids'] authed_role_ids = jsonutils.loads(authed_role_ids) refs = ([_format_role_entity(x) for x in authed_role_ids]) return self.wrap_collection(refs) class OAuth1AccessTokenRoleResource(ks_flask.ResourceBase): collection_key = 'roles' member_key = 'role' def get(self, user_id, access_token_id, role_id): """Get role for access token. GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/ {access_token_id}/roles/{role_id} """ ENFORCER.enforce_call(action='identity:get_access_token_role') access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise ks_exception.Unauthorized(_('User IDs do not match')) authed_role_ids = access_token['role_ids'] authed_role_ids = jsonutils.loads(authed_role_ids) for authed_role_id in authed_role_ids: if authed_role_id == role_id: role = _format_role_entity(role_id) return self.wrap_member(role) raise ks_exception.RoleNotFound(role_id=role_id) class UserAppCredListCreateResource(ks_flask.ResourceBase): collection_key = 'application_credentials' member_key = 'application_credential' _public_parameters = frozenset([ 'id', 'name', 'description', 'expires_at', 'project_id', 'roles', # secret is only exposed after create, it is not stored 'secret', 'links', 'unrestricted', 'access_rules' ]) @staticmethod def _generate_secret(): length = 64 secret = secrets.token_bytes(length) secret = base64.urlsafe_b64encode(secret) secret = secret.rstrip(b'=') secret = secret.decode('utf-8') return secret @staticmethod def _normalize_role_list(app_cred_roles): roles = [] for role in app_cred_roles: if role.get('id'): roles.append(role) else: roles.append(PROVIDERS.role_api.get_unique_role_by_name( role['name'])) return roles def _get_roles(self, app_cred_data, token): if app_cred_data.get('roles'): roles = self._normalize_role_list(app_cred_data['roles']) # NOTE(cmurphy): The user is not allowed to add a role that is not # in their token. This is to prevent trustees or application # credential users from escallating their privileges to include # additional roles that the trustor or application credential # creator has assigned on the project. token_roles = [r['id'] for r in token.roles] for role in roles: if role['id'] not in token_roles: detail = _('Cannot create an application credential with ' 'unassigned role') raise ks_exception.ApplicationCredentialValidationError( detail=detail) else: roles = token.roles return roles def get(self, user_id): """List application credentials for user. GET/HEAD /v3/users/{user_id}/application_credentials """ filters = ('name',) ENFORCER.enforce_call(action='identity:list_application_credentials', filters=filters) app_cred_api = PROVIDERS.application_credential_api hints = self.build_driver_hints(filters) refs = app_cred_api.list_application_credentials(user_id, hints=hints) return self.wrap_collection(refs, hints=hints) def post(self, user_id): """Create application credential. POST /v3/users/{user_id}/application_credentials """ ENFORCER.enforce_call(action='identity:create_application_credential') app_cred_data = self.request_body_json.get( 'application_credential', {}) validation.lazy_validate(app_cred_schema.application_credential_create, app_cred_data) token = self.auth_context['token'] _check_unrestricted_application_credential(token) if self.oslo_context.user_id != user_id: action = _('Cannot create an application credential for another ' 'user.') raise ks_exception.ForbiddenAction(action=action) project_id = self.oslo_context.project_id app_cred_data = self._assign_unique_id(app_cred_data) if not app_cred_data.get('secret'): app_cred_data['secret'] = self._generate_secret() app_cred_data['user_id'] = user_id app_cred_data['project_id'] = project_id app_cred_data['roles'] = self._get_roles(app_cred_data, token) if app_cred_data.get('expires_at'): app_cred_data['expires_at'] = utils.parse_expiration_date( app_cred_data['expires_at']) if app_cred_data.get('access_rules'): for access_rule in app_cred_data['access_rules']: # If user provides an access rule by ID, it will be looked up # by ID. If user provides an access rule that is identical to # an existing one, the ID generated here will be ignored and # the pre-existing access rule will be used. if 'id' not in access_rule: # Generate directly, rather than using _assign_unique_id, # so that there is no deep copy made access_rule['id'] = uuid.uuid4().hex app_cred_data = self._normalize_dict(app_cred_data) app_cred_api = PROVIDERS.application_credential_api try: ref = app_cred_api.create_application_credential( app_cred_data, initiator=self.audit_initiator) except ks_exception.RoleAssignmentNotFound as e: # Raise a Bad Request, not a Not Found, in accordance with the # API-SIG recommendations: # https://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications raise ks_exception.ApplicationCredentialValidationError( detail=str(e)) return self.wrap_member(ref), http.client.CREATED class UserAppCredGetDeleteResource(ks_flask.ResourceBase): collection_key = 'application_credentials' member_key = 'application_credential' def get(self, user_id, application_credential_id): """Get application credential resource. GET/HEAD /v3/users/{user_id}/application_credentials/ {application_credential_id} """ target = _update_request_user_id_attribute() ENFORCER.enforce_call( action='identity:get_application_credential', target_attr=target, ) ref = PROVIDERS.application_credential_api.get_application_credential( application_credential_id) return self.wrap_member(ref) def delete(self, user_id, application_credential_id): """Delete application credential resource. DELETE /v3/users/{user_id}/application_credentials/ {application_credential_id} """ target = _update_request_user_id_attribute() ENFORCER.enforce_call( action='identity:delete_application_credential', target_attr=target ) token = self.auth_context['token'] _check_unrestricted_application_credential(token) PROVIDERS.application_credential_api.delete_application_credential( application_credential_id, initiator=self.audit_initiator) return None, http.client.NO_CONTENT class UserAccessRuleListResource(ks_flask.ResourceBase): collection_key = 'access_rules' member_key = 'access_rule' def get(self, user_id): """List access rules for user. GET/HEAD /v3/users/{user_id}/access_rules """ filters = ('service', 'path', 'method',) ENFORCER.enforce_call(action='identity:list_access_rules', filters=filters, build_target=_build_user_target_enforcement) app_cred_api = PROVIDERS.application_credential_api hints = self.build_driver_hints(filters) refs = app_cred_api.list_access_rules_for_user(user_id, hints=hints) hints = self.build_driver_hints(filters) return self.wrap_collection(refs, hints=hints) class UserAccessRuleGetDeleteResource(ks_flask.ResourceBase): collection_key = 'access_rules' member_key = 'access_rule' def get(self, user_id, access_rule_id): """Get access rule resource. GET/HEAD /v3/users/{user_id}/access_rules/{access_rule_id} """ ENFORCER.enforce_call( action='identity:get_access_rule', build_target=_build_user_target_enforcement ) ref = PROVIDERS.application_credential_api.get_access_rule( access_rule_id) return self.wrap_member(ref) def delete(self, user_id, access_rule_id): """Delete access rule resource. DELETE /v3/users/{user_id}/access_rules/{access_rule_id} """ ENFORCER.enforce_call( action='identity:delete_access_rule', build_target=_build_user_target_enforcement ) PROVIDERS.application_credential_api.delete_access_rule( access_rule_id, initiator=self.audit_initiator) return None, http.client.NO_CONTENT class UserAPI(ks_flask.APIBase): _name = 'users' _import_name = __name__ resources = [UserResource] resource_mapping = [ ks_flask.construct_resource_map( resource=UserChangePasswordResource, url='/users/<string:user_id>/password', resource_kwargs={}, rel='user_change_password', path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserGroupsResource, url='/users/<string:user_id>/groups', resource_kwargs={}, rel='user_groups', path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserProjectsResource, url='/users/<string:user_id>/projects', resource_kwargs={}, rel='user_projects', path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserOSEC2CredentialsResourceListCreate, url='/users/<string:user_id>/credentials/OS-EC2', resource_kwargs={}, rel='user_credentials', resource_relation_func=( json_home_relations.os_ec2_resource_rel_func), path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserOSEC2CredentialsResourceGetDelete, url=('/users/<string:user_id>/credentials/OS-EC2/' '<string:credential_id>'), resource_kwargs={}, rel='user_credential', resource_relation_func=( json_home_relations.os_ec2_resource_rel_func), path_vars={ 'credential_id': json_home.build_v3_parameter_relation( 'credential_id'), 'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=OAuth1ListAccessTokensResource, url='/users/<string:user_id>/OS-OAUTH1/access_tokens', resource_kwargs={}, rel='user_access_tokens', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func), path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=OAuth1AccessTokenCRUDResource, url=('/users/<string:user_id>/OS-OAUTH1/' 'access_tokens/<string:access_token_id>'), resource_kwargs={}, rel='user_access_token', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=OAuth1AccessTokenRoleListResource, url=('/users/<string:user_id>/OS-OAUTH1/access_tokens/' '<string:access_token_id>/roles'), resource_kwargs={}, rel='user_access_token_roles', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func), path_vars={'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=OAuth1AccessTokenRoleResource, url=('/users/<string:user_id>/OS-OAUTH1/access_tokens/' '<string:access_token_id>/roles/<string:role_id>'), resource_kwargs={}, rel='user_access_token_role', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func), path_vars={'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserAppCredListCreateResource, url='/users/<string:user_id>/application_credentials', resource_kwargs={}, rel='application_credentials', path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserAppCredGetDeleteResource, url=('/users/<string:user_id>/application_credentials/' '<string:application_credential_id>'), resource_kwargs={}, rel='application_credential', path_vars={ 'user_id': json_home.Parameters.USER_ID, 'application_credential_id': json_home.Parameters.APPLICATION_CRED_ID} ), ks_flask.construct_resource_map( resource=UserAccessRuleListResource, url='/users/<string:user_id>/access_rules', resource_kwargs={}, rel='access_rules', path_vars={'user_id': json_home.Parameters.USER_ID} ), ks_flask.construct_resource_map( resource=UserAccessRuleGetDeleteResource, url=('/users/<string:user_id>/access_rules/' '<string:access_rule_id>'), resource_kwargs={}, rel='access_rule', path_vars={ 'user_id': json_home.Parameters.USER_ID, 'access_rule_id': json_home.Parameters.ACCESS_RULE_ID} ) ] APIs = (UserAPI,)
import math import numpy # - * - coding: utf - 8 - * - """ Created on Wed May 31 15:27:40 2017 @author: Kristen """ # - * - coding: utf - 8 - * - """ Created on Tue May 30 09:40:34 2017 @author: Kristen """ # finds the distance between two atoms def distance(position1, position2): return math.sqrt(math.pow(position1[0] - position2[0], 2) + math.pow(position1[1] - position2[1], 2) + math.pow(position1[2] - position2[2], 2)) # finds if a triplet could have an Si atom between them def dists(positions, dist): # if there were not enough close to make a triplet, return none if len(positions) < 3: return [""] # if there is a triplet and they are close enough to have a Si, # return the triplet, else return blank if len(positions) == 3: if distance(positions[1], positions[2]) <= dist: return positions else: return[""] numbers = [] if len(positions) == 5: print(1) # if there are more then 2 close enough to have a Si between them, findthe # one that could not given the other two for i in range(len(positions)): numbers.append(0) for i in range(1, len(positions) - 1): for j in range(1, len(positions) - i): # if two positions are not close enough, add a counter to both. # If they are close enough, remove a counter from both if distance(positions[i], positions[i + j]) > dist: numbers[i] += 1 numbers[i + j] += 1 else: numbers[i] -= 1 numbers[i + j] -= 1 # removetheonewiththemostcounters del positions[numbers.index(max(numbers))] # if these still are not close enough to have a triplet between them, # return none. If they are close enough, return the new triplet if distance(positions[1], positions[2]) <= dist: return positions else: return[""] # finds four membered rings and returns a list of lists of their locaitons def find_four(opositions, far): rings = [[]] remov = [] # for each oxygen for i in range(len(opositions)): rings.append([""]) rings[i] = [opositions[i]] # for each oxygen with an x position higher than the current for j in range(1, len(opositions) - i): # if th exposition is less than the possible distance between two # oxygenatoms(variableinclusionradius) if abs(opositions[i][0] - opositions[i + j][0]) <= far: # if the distance between the two oxygens is less than the # characteristic distance(variable inclusion radius) if distance(opositions[i], opositions[i + j]) <= far: rings[i].append(opositions[i + j]) rem = 0 if len(rings[i]) < 4: rem = 1 elif len(rings[i]) > 4: while len(rings[i]) != 4: distances = [] for k in range(len(rings[i])): tot_len = 0 for l in range(1, len(rings[i]) - k): tot_len += distance(rings[i][k], rings[i][k + l]) distances.append(tot_len) del rings[i][distances.index(max(distances))] if len(rings[i]) == 4: distances = [] for n in range(len(rings[i]) - 1): for m in range(1, len(rings[i]) - n): distances.append(distance(rings[i][n], rings[i][n + m])) for n in range(2): del distances[distances.index(max(distances))] for n in range(4): for m in range(1, len(distances) - n): if abs(distances[n] - distances[n + m]) > .03: rem = 1 if rem == 1: remov.insert(0, i) for n in range(len(remov)): del rings[remov[n]] return rings # finds the area of triangle def triarea(p1, p2, p3): a = distance(p1, p2) b = distance(p2, p3) c = distance(p1, p3) s = (a + b + c) / 2 return math.sqrt(s * (s - a) * (s - b) * (s - c)) def ringarea(corners): n = len(corners) area = 0.0 for i in range(n): j = (i + 1) % n area += corners[i][0] * corners[j][1] area -= corners[j][0] * corners[i][1] area = abs(area) / 2.0 return float(area) # finds if the silicon atom is within a 4 membered ring def rem4(rings, si): for i in range(len(rings)): triangles = 0 distances = [] locations = [] for n in range(len(rings[i]) - 1): for m in range(1, len(rings[i]) - n): distances.append(distance(rings[i][n], rings[i][n + m])) locations.append([n, n + m]) locations.append(len(rings[i])) for n in range(2): del locations[distances.index(max(distances))] del distances[distances.index(max(distances))] for n in range(len(locations)): triangles += triarea(rings[i][locations[n][0]], rings[i][locations[n][1]], si) if ringarea(rings[i]) == triangles: return"n" return"y" # finds the position of a Si given a triplet of oxygen def si_finder(opositions): # characteristic distance dist = 1.6 * math.pow(10, - 1) # sets up the translation to happen around a basepoint(the first point in # the positions) trans = [[0, 0, 0], [opositions[1][0] - opositions[0][0], opositions[1][1] - opositions[0][1], opositions[1][2] - opositions[0][2]], [opositions[2][0] - opositions[0][0], opositions[2][1] - opositions[0][1], opositions[2][2] - opositions[0][2]]] # finds vector perpendicular to the plane of the three points v = numpy.matrix([numpy.linalg.det([[trans[1][1], trans[2][1]], [trans[1][2], trans[2][2]]]), numpy.linalg.det([[trans[1][0], trans[2][0]], [trans[1][2], trans[2][2]]]), numpy.linalg.det([[trans[1][0], trans[2][0]], [trans[1][1], trans[2][1]]])]) # sets up first rotation matrix about the x axis theta = math.atan2(v.item(1), v.item(2)) xmatr = numpy.matrix([[1, 0, 0], [0, math.cos(theta), - math.sin(theta)], [0, math.sin(theta), math.cos(theta)]]) trans1 = numpy.matrix(trans) rot1 = numpy.matrix.dot(trans1, xmatr) v1 = numpy.matrix.dot(v, xmatr) # second rotation matrix about the y axis rho = math.atan2(v1.item(0), v1.item(2)) ymatr = numpy.matrix([[math.cos(rho), 0, math.sin(rho)], [0, 1, 0], [-math.sin(rho), 0, math.cos(rho)]]) rot2 = numpy.matrix.dot(rot1, ymatr) # should be in the xy plane now. Have to rotate such that two points # are on the x axis alph = math.atan2(rot2.item(4), rot2.item(3)) bet = math.atan2(rot2.item(7), rot2.item(6)) r1 = math.sqrt(math.pow(rot2.item(3), 2) + math.pow(rot2.item(4), 2)) r2 = math.sqrt(math.pow(rot2.item(6), 2) + math.pow(rot2.item(7), 2)) x = r1 / 2 y = r2 * (1 - math.cos(bet - alph)) / (2.0 * math.sin(bet - alph)) z = math.sqrt(abs(math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2))) si_pos = numpy.matrix([x, y, z]) # rotate back to originial position init = math.atan2(si_pos.item(1), si_pos.item(0)) r = math.sqrt(math.pow(si_pos.item(0), 2) + math.pow(si_pos.item(1), 2)) x = r * math.cos(init + alph) y = r * math.sin(init + alph) si_pos = numpy.matrix([x, y, z]) # undo second rotation matrix iymatr = numpy.linalg.inv(ymatr) si_pos = numpy.matrix.dot(si_pos, iymatr) # undo first rotation matrix ixmatr = numpy.linalg.inv(xmatr) si_pos = numpy.matrix.dot(si_pos, ixmatr) # translate back so there is no point at the origin si_pos = [si_pos.item(0) + opositions[0][0], si_pos.item(1) + opositions[0][1], si_pos.item(2) + opositions[0][2]] return si_pos # locates all possiblee triplets def o_locator(opositions): # assumed oxygens are ordered by increasing x values # used to collect all the found oxygens close enough to have a single Si # between them found = [[""]] # for each oxygen for i in range(len(opositions)): found[i] = [opositions[i]] # for each oxygen with an x position higher than the current for j in range(1, len(opositions) - i): # if the x position is less than the possible distance between two # oxygenatoms(variableinclusionradius) if abs(opositions[i][0] - opositions[i + j][0]) <= \ 3.45 * math.pow(10, - 1): # if the distance between the two oxygens is less than the # characteristic distance(variable inclusion radius) if distance(opositions[i], opositions[i + j]) <= \ 3.45 * math.pow(10, - 1): found[i].append(opositions[i + j]) found.append([""]) # removes last appended empty list del found[len(found) - 1] # remove all those too far apart using dist function (variable inclusion # radius) for n in range(len(found)): found[n] = dists(found[n], .345) # createanarrayforpositionstoremove remov = [] # for all atoms with found oxygens for n in range(len(found)): # add empties to a list for removal if found[n] == [""]: remov.insert(0, n) # remove those in the remove list for m in range(len(remov)): del found[remov[m]] # return the list of those oxygen that have a possible Si between them return found def locate_si(positions, dist): # assumes presorted positions by x position doubles = [] # finds all within the given radius and adds those doubles to the list for i in range(len(positions)): for j in range(1, len(positions) - i): if distance(positions[i], positions[i + j]) <= dist: doubles.append([positions[i], positions[i + j]]) return doubles def find_o(positions, dist): opositions = [] for i in range(len(positions)): # center at origin pos2 = [positions[i][1][0] - positions[i][0][0], positions[i][1][1] - positions[i][0][1], positions[i][1][2] - positions[i][0][2]] # rotate until both points are in the xy plane theta = numpy.arctan2(pos2[1], pos2[0]) phi = numpy.arctan2(pos2[2], pos2[0]) newx = math.sqrt(math.pow(pos2[0], 2) + math.pow(pos2[2], 2)) newy = newx * math.tan(theta) # find in si position (midpoint between origin and pos 2 in the x - y # plane with x making up the difference) x = newx / 2 y = newy / 2 if math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2) > 0: z = math.sqrt(math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2)) else: z = 0 # current angle above x - y plane r = math.sqrt(math.pow(x, 2) + math.pow(y, 2) + math.pow(z, 2)) alph = math.asin(z / r) # when rotated back, it will rotate to angle phi + alph opos = [r * math.cos(theta) * math.cos(alph + phi), r * math.sin(theta) * math.cos(alph + phi), r * math.sin(alph + phi)] # append to the list opositions.append([opos[0] + positions[i][0][0], opos[1] + positions[i][0][1], opos[2] + positions[i][0][2]]) return opositions def main(): # input center positions cpfile = input("Centers_to_test.txt") # convert data in file into floats and append to a position list with open(cpfile) as f: content = f.readline() string = "" locations = [] for i in range(len(content)): if content[i] == " ": locations.append(float(string)) string = "" else: string += content[i] locations.append(float(string)) positions = [[""]] for i in range(len(locations)): if i % 3 == 0: positions[int(i / 3)] = [locations[i]] positions.append("") else: positions[int(i / 3)].append(locations[i]) del positions[len(positions) - 1] # sort positions for the double finder function positions = sorted(positions) # Create a Graph of the Input Data xypts = [] for i in range(len(positions)): xypts.append([positions[i][0], positions[i][1]]) # print(xypts) points = numpy.array(xypts) from scipy.spatial import Delaunay tri = Delaunay(points) print(len(tri.simplices)) # print(tri.simplices) o_locations = [] for i in range(len(tri.simplices)): midptx1 = 0.50 * (points[tri.simplices][i][0][0] + points[tri.simplices][i][1][0]) midpty1 = 0.50 * (points[tri.simplices][i][0][1] + points[tri.simplices][i][1][1]) o_locations.append([midptx1, midpty1, 0]) midptx2 = (points[tri.simplices][i][1][0] + points[tri.simplices][i][2][0]) / 2.00 midpty2 = (points[tri.simplices][i][1][1] + points[tri.simplices][i][2][1]) / 2.00 o_locations.append([midptx2, midpty2, 0]) midptx3 = (points[tri.simplices][i][2][0] + points[tri.simplices][i][0][0]) / 2.00 midpty3 = (points[tri.simplices][i][2][1] + points[tri.simplices][i][0][1]) / 2.00 o_locations.append([midptx3, midpty3, 0]) print(len(o_locations)) o_locations.sort o_locations = sorted(o_locations) # print(o_locations) remove = [] for i in range(len(o_locations) - 1): if o_locations[i] == o_locations[i + 1]: remove.append(i + 1) remove.sort(reverse=True) print(len(o_locations)) # print(remove) for i in range(len(remove)): del (o_locations[remove[i]]) print(len(o_locations)) print(len(o_locations)) xOpos = [] yOpos = [] for i in range(len(o_locations)): xOpos.append(o_locations[i][0]) yOpos.append(o_locations[i][1]) # write O positions to an out file out = open("OfC Positions 120106_008 Python Output.txt", "w") out.write(str(o_locations)) out.write("nn") positions = o_locations # find triplets triples = o_locator(positions) print(triples) # find Si positions si_locations = [] for j in range(len(triples)): si_locations.append(si_finder(triples[j])) delete = [] for i in range(len(delete)): del si_locations[delete[i]] # Plot xSipos = [] ySipos = [] for i in range(len(si_locations)): xSipos.append(si_locations[i][0]) ySipos.append(si_locations[i][1]) xOpos = [] yOpos = [] for i in range(len(o_locations)): xOpos.append(o_locations[i][0]) yOpos.append(o_locations[i][1]) import matplotlib.pyplot as plt plt.triplot(points[:, 0], points[:, 1], tri.simplices.copy()) plt.plot(points[:, 0], points[:, 1], 'o', color='# 2E9AFE') plt.scatter(xOpos, yOpos, label='Center Positions', color='# 2E9AFE') plt.scatter(xOpos, yOpos, label='Oxygen Positions', color='r') plt.scatter(xSipos, ySipos, label='Silicon Positions', color='g') # write Si positions to an outfile out = open("Si Positions Output 170404.txt", "w") out.write(str(si_locations)) out.write("\n") plt.xlabel('x (nm)') plt.ylabel('y (nm)') plt.title('Center Positions') plt.legend() plt.show() # write O positions to an out file out = open("OfC Positions 120106_008 Python Output.txt", "w") out.write(str(o_locations)) out.write("nn") if __name__ == "__main__": main()
import logging import re try: import dnf except ImportError: dnf = None from pyp2rpm import settings from pyp2rpm import utils from pyp2rpm.logger import LoggerWriter logger = logging.getLogger(__name__) class NameConvertor(object): def __init__(self, distro): self.distro = distro self.reg_start = re.compile(r'^[Pp]ython(\d*|)-(.*)') self.reg_end = re.compile(r'(.*)-(python)(\d*|)$') @staticmethod def rpm_versioned_name(name, version, default_number=False, epel=False): """Properly versions the name. For example: rpm_versioned_name('python-foo', '26') will return python26-foo rpm_versioned_name('pyfoo, '3') will return python3-pyfoo If version is same as settings.DEFAULT_PYTHON_VERSION, no change is done. Args: name: name to version version: version or None Returns: Versioned name or the original name if given version is None. """ regexp = re.compile(r'^python(\d*|)-(.*)') if not version or version == settings.DEFAULT_PYTHON_VERSION and not default_number: found = regexp.search(name) # second check is to avoid renaming of python2-devel to python-devel if found and found.group(2) != 'devel': if not epel: return 'python-{0}'.format(regexp.search(name).group(2)) return name versioned_name = name if version: if regexp.search(name): versioned_name = re.sub(r'^python(\d*|)-', 'python{0}-'.format(version), name) else: versioned_name = 'python{0}-{1}'.format(version, name) if epel and version != settings.DEFAULT_PYTHON_VERSION: versioned_name = versioned_name.replace('{0}'.format( version), '%{{python{0}_pkgversion}}'.format(version)) return versioned_name def rpm_name(self, name, python_version=settings.DEFAULT_PYTHON_VERSION): """Returns name of the package coverted to (possibly) correct package name according to Packaging Guidelines. Args: name: name to convert python_version: python version for which to retrieve the name of the package Returns: Converted name of the package, that should be in line with Fedora Packaging Guidelines. If for_python is not None, the returned name is in form python%(version)s-%(name)s """ logger.debug('Converting name: {0} to rpm name, version: {1}.'.format(name, python_version)) rpmized_name = self.base_name(name) rpmized_name = 'python-{0}'.format(rpmized_name) if self.distro == 'mageia': rpmized_name = rpmized_name.lower() logger.debug('Rpmized name of {0}: {1}.'.format(name, rpmized_name)) return NameConvertor.rpm_versioned_name(rpmized_name, python_version) def base_name(self, name): """Removes any python prefixes of suffixes from name if present.""" base_name = name.replace('.', "-") # remove python prefix if present found_prefix = self.reg_start.search(name) if found_prefix: base_name = found_prefix.group(2) # remove -pythonXY like suffix if present found_end = self.reg_end.search(name.lower()) if found_end: base_name = found_end.group(1) return base_name class NameVariants(object): """Class to generate variants of python package name and choose most likely correct one. """ def __init__(self, name, version, py_init=True): self.name = name self.version = version self.variants = {} if py_init: self.names_init() self.variants_init() def find_match(self, name): for variant in ['python_ver_name', 'pyver_name', 'name_python_ver', 'raw_name']: # iterates over all variants and store name to variants if matches if canonical_form(name) == canonical_form(getattr(self, variant)): self.variants[variant] = name def merge(self, other): """Merges object with other NameVariants object, not set values of self.variants are replace by values from other object. """ if not isinstance(other, NameVariants): raise TypeError("NameVariants isinstance can be merge with" "other isinstance of the same class") for key in self.variants: self.variants[key] = self.variants[key] or other.variants[key] return self def names_init(self): self.python_ver_name = 'python{0}-{1}'.format(self.version, self.name) self.pyver_name = self.name if self.name.startswith('py') else 'py{0}{1}'.format( self.version, self.name) self.name_python_ver = '{0}-python{1}'.format(self.name, self.version) self.raw_name = self.name def variants_init(self): self.variants = {'python_ver_name': None, 'pyver_name': None, 'name_python_ver': None, 'raw_name': None} @property def best_matching(self): return (self.variants['python_ver_name'] or self.variants['pyver_name'] or self.variants['name_python_ver'] or self.variants['raw_name']) class DandifiedNameConvertor(NameConvertor): """Name convertor based on DNF API query, checks if converted name of the package exists in Fedora repositories. If it doesn't, searches for the correct variant of the name. """ def __init__(self, *args): super(DandifiedNameConvertor, self).__init__(*args) if dnf is None or self.distro != 'fedora': raise RuntimeError("DandifiedNameConvertor needs optional require dnf, and " "can be used for Fedora distro only.") with dnf.Base() as base: RELEASEVER = dnf.rpm.detect_releasever(base.conf.installroot) base.conf.substitutions['releasever'] = RELEASEVER base.read_all_repos() base.fill_sack() self.query = base.sack.query() def rpm_name(self, name, python_version=None): """Checks if name converted using superclass rpm_name_method match name of package in the query. Searches for correct name if it doesn't. """ original_name = name converted = super(DandifiedNameConvertor, self).rpm_name(name, python_version) python_query = self.query.filter(name__substr=['python', 'py', original_name, canonical_form(original_name)]) if converted in [pkg.name for pkg in python_query]: logger.debug("Converted name exists") return converted logger.debug("Converted name not found, searches for correct form") not_versioned_name = NameVariants(self.base_name(original_name), '') versioned_name = NameVariants(self.base_name(original_name), python_version) if self.base_name(original_name).startswith("py"): nonpy_name = NameVariants(self.base_name( original_name)[2:], python_version) for pkg in python_query: versioned_name.find_match(pkg.name) not_versioned_name.find_match(pkg.name) if 'nonpy_name' in locals(): nonpy_name.find_match(pkg.name) if 'nonpy_name' in locals(): versioned_name = versioned_name.merge(nonpy_name) correct_form = versioned_name.merge(not_versioned_name).best_matching logger.debug("Most likely correct form of the name {0}.".format(correct_form)) return correct_form or converted def canonical_form(name): return name.lower().replace('-', '').replace('_', '')
from datetime import datetime import logging import decimal import base64 import json import time from lib import config, util decimal.setcontext(decimal.Context(prec=8, rounding=decimal.ROUND_HALF_EVEN)) D = decimal.Decimal def calculate_price(base_quantity, quote_quantity, base_divisibility, quote_divisibility, order_type = None): if not base_divisibility: base_quantity *= config.UNIT if not quote_divisibility: quote_quantity *= config.UNIT try: if order_type == 'BUY': decimal.setcontext(decimal.Context(prec=8, rounding=decimal.ROUND_DOWN)) elif order_type == 'SELL': decimal.setcontext(decimal.Context(prec=8, rounding=decimal.ROUND_UP)) price = format(D(quote_quantity) / D(base_quantity), '.8f') decimal.setcontext(decimal.Context(prec=8, rounding=decimal.ROUND_HALF_EVEN)) return price except Exception, e: decimal.setcontext(decimal.Context(prec=8, rounding=decimal.ROUND_HALF_EVEN)) return '0' def get_pairs_with_orders(addresses=[], max_pairs=12): pairs_with_orders = [] sources = '''AND source IN ({})'''.format(','.join(['?' for e in range(0,len(addresses))])) sql = '''SELECT (MIN(give_asset, get_asset) || '/' || MAX(give_asset, get_asset)) AS pair, COUNT(*) AS order_count FROM orders WHERE give_asset != get_asset AND status = ? {} GROUP BY pair ORDER BY order_count DESC LIMIT ?'''.format(sources) bindings = ['open'] + addresses + [max_pairs] my_pairs = util.call_jsonrpc_api('sql', {'query': sql, 'bindings': bindings})['result'] for my_pair in my_pairs: base_asset, quote_asset = util.assets_to_asset_pair(*tuple(my_pair['pair'].split("/"))) top_pair = { 'base_asset': base_asset, 'quote_asset': quote_asset, 'my_order_count': my_pair['order_count'] } if my_pair['pair'] == 'VIA/XCH': # XCP/BTC always in first pairs_with_orders.insert(0, top_pair) else: pairs_with_orders.append(top_pair) return pairs_with_orders def get_pairs(quote_asset='XCH', exclude_pairs=[], max_pairs=12, from_time=None): bindings = [] sql = '''SELECT (CASE WHEN forward_asset = ? THEN backward_asset ELSE forward_asset END) AS base_asset, (CASE WHEN backward_asset = ? THEN backward_asset ELSE forward_asset END) AS quote_asset, (CASE WHEN backward_asset = ? THEN (forward_asset || '/' || backward_asset) ELSE (backward_asset || '/' || forward_asset) END) AS pair, (CASE WHEN forward_asset = ? THEN backward_quantity ELSE forward_quantity END) AS bq, (CASE WHEN backward_asset = ? THEN backward_quantity ELSE forward_quantity END) AS qq ''' if from_time: sql += ''', block_time ''' sql += '''FROM order_matches ''' bindings += [quote_asset, quote_asset, quote_asset, quote_asset, quote_asset] if from_time: sql += '''INNER JOIN blocks ON order_matches.block_index = blocks.block_index ''' priority_quote_assets = [] for priority_quote_asset in config.QUOTE_ASSETS: if priority_quote_asset != quote_asset: priority_quote_assets.append(priority_quote_asset) else: break if len(priority_quote_assets) > 0: asset_bindings = ','.join(['?' for e in range(0,len(priority_quote_assets))]) sql += '''WHERE ((forward_asset = ? AND backward_asset NOT IN ({})) OR (forward_asset NOT IN ({}) AND backward_asset = ?)) '''.format(asset_bindings, asset_bindings) bindings += [quote_asset] + priority_quote_assets + priority_quote_assets + [quote_asset] else: sql += '''WHERE ((forward_asset = ?) OR (backward_asset = ?)) ''' bindings += [quote_asset, quote_asset] if len(exclude_pairs) > 0: sql += '''AND pair NOT IN ({}) '''.format(','.join(['?' for e in range(0,len(exclude_pairs))])) bindings += exclude_pairs if from_time: sql += '''AND block_time > ? ''' bindings += [from_time] sql += '''AND forward_asset != backward_asset AND status = ?''' bindings += ['completed', max_pairs] sql = '''SELECT base_asset, quote_asset, pair, SUM(bq) AS base_quantity, SUM(qq) AS quote_quantity FROM ({}) GROUP BY pair ORDER BY quote_quantity DESC LIMIT ?'''.format(sql) return util.call_jsonrpc_api('sql', {'query': sql, 'bindings': bindings})['result'] def get_quotation_pairs(exclude_pairs=[], max_pairs=12, from_time=None, include_currencies=[]): all_pairs = [] currencies = include_currencies if len(include_currencies) > 0 else config.MARKET_LIST_QUOTE_ASSETS for currency in currencies: currency_pairs = get_pairs(quote_asset=currency, exclude_pairs=exclude_pairs, max_pairs=max_pairs, from_time=from_time) max_pairs = max_pairs - len(currency_pairs) for currency_pair in currency_pairs: if currency_pair['pair'] == 'XCP/BTC': all_pairs.insert(0, currency_pair) else: all_pairs.append(currency_pair) return all_pairs @util.block_cache def get_users_pairs(addresses=[], max_pairs=12, quote_assets=config.MARKET_LIST_QUOTE_ASSETS): top_pairs = [] all_assets = [] exclude_pairs = [] if len(addresses) > 0: top_pairs += get_pairs_with_orders(addresses, max_pairs) for p in top_pairs: exclude_pairs += [p['base_asset'] + '/' + p['quote_asset']] all_assets += [p['base_asset'], p['quote_asset']] for currency in quote_assets: if len(top_pairs) < max_pairs: limit = max_pairs - len(top_pairs) currency_pairs = get_pairs(currency, exclude_pairs, limit) for currency_pair in currency_pairs: top_pair = { 'base_asset': currency_pair['base_asset'], 'quote_asset': currency_pair['quote_asset'] } if currency_pair['pair'] == 'XCH/VIA': # XCP/BTC always in first top_pairs.insert(0, top_pair) else: top_pairs.append(top_pair) all_assets += [currency_pair['base_asset'], currency_pair['quote_asset']] if ('VIA' in quote_assets) and ('XCH/VIA' not in [p['base_asset'] + '/' + p['quote_asset'] for p in top_pairs]): top_pairs.insert(0, { 'base_asset': 'XCH', 'quote_asset': 'VIA' }) all_assets += ['XCH', 'VIA'] top_pairs = top_pairs[:12] all_assets = list(set(all_assets)) supplies = get_assets_supply(all_assets) for p in range(len(top_pairs)): price, trend, price24h, progression = get_price_movement(top_pairs[p]['base_asset'], top_pairs[p]['quote_asset'], supplies=supplies) top_pairs[p]['price'] = format(price, ".8f") top_pairs[p]['trend'] = trend top_pairs[p]['progression'] = format(progression, ".2f") top_pairs[p]['price_24h'] = format(price24h, ".8f") return top_pairs def merge_same_price_orders(orders): if len(orders) > 1: merged_orders = [] orders = sorted(orders, key=lambda x: D(x['price'])) merged_orders.append(orders[0]) for o in range(1, len(orders)): if D(orders[o]['price']) == D(merged_orders[-1]['price']): merged_orders[-1]['amount'] += orders[o]['amount'] merged_orders[-1]['total'] += orders[o]['total'] else: merged_orders.append(orders[o]) return merged_orders else: return orders @util.block_cache def get_market_orders(asset1, asset2, addresses=[], supplies=None, min_fee_provided=0.95, max_fee_required=0.95): base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2) if not supplies: supplies = get_assets_supply([asset1, asset2]) market_orders = [] buy_orders = [] sell_orders = [] sql = '''SELECT orders.*, blocks.block_time FROM orders INNER JOIN blocks ON orders.block_index=blocks.block_index WHERE status = ? ''' bindings = ['open'] if len(addresses) > 0: sql += '''AND source IN ({}) '''.format(','.join(['?' for e in range(0,len(addresses))])) bindings += addresses sql += '''AND give_remaining > 0 AND give_asset IN (?, ?) AND get_asset IN (?, ?) ORDER BY tx_index DESC''' bindings += [asset1, asset2, asset1, asset2] orders = util.call_jsonrpc_api('sql', {'query': sql, 'bindings': bindings})['result'] for order in orders: market_order = {} exclude = False if order['give_asset'] == 'VIA': try: fee_provided = order['fee_provided'] / (order['give_quantity'] / 100) market_order['fee_provided'] = format(D(order['fee_provided']) / (D(order['give_quantity']) / D(100)), '.2f') except Exception, e: fee_provided = min_fee_provided - 1 # exclude exclude = fee_provided < min_fee_provided elif order['get_asset'] == 'VIA': try: fee_required = order['fee_required'] / (order['get_quantity'] / 100) market_order['fee_required'] = format(D(order['fee_required']) / (D(order['get_quantity']) / D(100)), '.2f') except Exception, e: fee_required = max_fee_required + 1 # exclude exclude = fee_required > max_fee_required if not exclude: if order['give_asset'] == base_asset: price = calculate_price(order['give_quantity'], order['get_quantity'], supplies[order['give_asset']][1], supplies[order['get_asset']][1], 'SELL') market_order['type'] = 'SELL' market_order['amount'] = order['give_remaining'] market_order['total'] = D(order['give_remaining']) * D(price) if not supplies[order['give_asset']][1] and supplies[order['get_asset']][1]: market_order['total'] = int(market_order['total'] * config.UNIT) elif supplies[order['give_asset']][1] and not supplies[order['get_asset']][1]: market_order['total'] = int(market_order['total'] / config.UNIT) else: market_order['total'] = int(market_order['total']) else: price = calculate_price(order['get_quantity'], order['give_quantity'], supplies[order['get_asset']][1], supplies[order['give_asset']][1], 'BUY') market_order['type'] = 'BUY' market_order['total'] = order['give_remaining'] market_order['amount'] = D(order['give_remaining']) / D(price) if supplies[order['give_asset']][1] and not supplies[order['get_asset']][1]: market_order['amount'] = int(market_order['amount'] / config.UNIT) elif not supplies[order['give_asset']][1] and supplies[order['get_asset']][1]: market_order['amount'] = int(market_order['amount'] * config.UNIT) else: market_order['amount'] = int(market_order['amount']) market_order['price'] = price if len(addresses) > 0: completed = format(((D(order['give_quantity']) - D(order['give_remaining'])) / D(order['give_quantity'])) * D(100), '.2f') market_order['completion'] = "{}%".format(completed) market_order['tx_index'] = order['tx_index'] market_order['tx_hash'] = order['tx_hash'] market_order['source'] = order['source'] market_order['block_index'] = order['block_index'] market_order['block_time'] = order['block_time'] market_orders.append(market_order) else: if market_order['type'] == 'SELL': sell_orders.append(market_order) else: buy_orders.append(market_order) if len(addresses) == 0: market_orders = merge_same_price_orders(sell_orders) + merge_same_price_orders(buy_orders) return market_orders @util.block_cache def get_market_trades(asset1, asset2, addresses=[], limit=50, supplies=None): limit = min(limit, 100) base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2) if not supplies: supplies = get_assets_supply([asset1, asset2]) market_trades = [] sources = '' bindings = ['expired'] if len(addresses) > 0: placeholder = ','.join(['?' for e in range(0,len(addresses))]) sources = '''AND (tx0_address IN ({}) OR tx1_address IN ({}))'''.format(placeholder, placeholder) bindings += addresses + addresses sql = '''SELECT order_matches.*, blocks.block_time FROM order_matches INNER JOIN blocks ON order_matches.block_index=blocks.block_index WHERE status != ? {} AND forward_asset IN (?, ?) AND backward_asset IN (?, ?) ORDER BY block_index DESC LIMIT ?'''.format(sources) bindings += [asset1, asset2, asset1, asset2, limit] order_matches = util.call_jsonrpc_api('sql', {'query': sql, 'bindings': bindings})['result'] for order_match in order_matches: if order_match['tx0_address'] in addresses: trade = {} trade['match_id'] = order_match['id'] trade['source'] = order_match['tx0_address'] trade['countersource'] = order_match['tx1_address'] trade['block_index'] = order_match['block_index'] trade['block_time'] = order_match['block_time'] trade['status'] = order_match['status'] if order_match['forward_asset'] == base_asset: trade['type'] = 'SELL' trade['price'] = calculate_price(order_match['forward_quantity'], order_match['backward_quantity'], supplies[order_match['forward_asset']][1], supplies[order_match['backward_asset']][1], 'SELL') trade['amount'] = order_match['forward_quantity'] trade['total'] = order_match['backward_quantity'] else: trade['type'] = 'BUY' trade['price'] = calculate_price(order_match['backward_quantity'], order_match['forward_quantity'], supplies[order_match['backward_asset']][1], supplies[order_match['forward_asset']][1], 'BUY') trade['amount'] = order_match['backward_quantity'] trade['total'] = order_match['forward_quantity'] market_trades.append(trade) if len(addresses)==0 or order_match['tx1_address'] in addresses: trade = {} trade['match_id'] = order_match['id'] trade['source'] = order_match['tx1_address'] trade['countersource'] = order_match['tx0_address'] trade['block_index'] = order_match['block_index'] trade['block_time'] = order_match['block_time'] trade['status'] = order_match['status'] if order_match['backward_asset'] == base_asset: trade['type'] = 'SELL' trade['price'] = calculate_price(order_match['backward_quantity'], order_match['forward_quantity'], supplies[order_match['backward_asset']][1], supplies[order_match['forward_asset']][1], 'SELL') trade['amount'] = order_match['backward_quantity'] trade['total'] = order_match['forward_quantity'] else: trade['type'] = 'BUY' trade['price'] = calculate_price(order_match['forward_quantity'], order_match['backward_quantity'], supplies[order_match['forward_asset']][1], supplies[order_match['backward_asset']][1], 'BUY') trade['amount'] = order_match['forward_quantity'] trade['total'] = order_match['backward_quantity'] market_trades.append(trade) return market_trades def get_assets_supply(assets=[]): supplies = {} if 'XCH' in assets: supplies['XCH'] = (util.call_jsonrpc_api('get_xcp_supply', [])['result'], True) assets.remove('XCH') if 'VIA' in assets: supplies['VIA'] = (0, True) assets.remove('VIA') if len(assets) > 0: sql = '''SELECT asset, SUM(quantity) AS supply, divisible FROM issuances WHERE asset IN ({}) AND status = ? GROUP BY asset ORDER BY asset'''.format(','.join(['?' for e in range(0,len(assets))])) bindings = assets + ['valid'] issuances = util.call_jsonrpc_api('sql', {'query': sql, 'bindings': bindings})['result'] for issuance in issuances: supplies[issuance['asset']] = (issuance['supply'], issuance['divisible']) return supplies def get_pair_price(base_asset, quote_asset, max_block_time=None, supplies=None): if not supplies: supplies = get_assets_supply([base_asset, quote_asset]) sql = '''SELECT *, MAX(tx0_index, tx1_index) AS tx_index, blocks.block_time FROM order_matches INNER JOIN blocks ON order_matches.block_index = blocks.block_index WHERE forward_asset IN (?, ?) AND backward_asset IN (?, ?) ''' bindings = [base_asset, quote_asset, base_asset, quote_asset] if max_block_time: sql += '''AND block_time <= ? ''' bindings += [max_block_time] sql += '''ORDER BY tx_index DESC LIMIT 2''' order_matches = util.call_jsonrpc_api('sql', {'query': sql, 'bindings': bindings})['result'] if len(order_matches) == 0: last_price = D(0.0) elif order_matches[0]['forward_asset'] == base_asset: last_price = calculate_price(order_matches[0]['forward_quantity'], order_matches[0]['backward_quantity'], supplies[order_matches[0]['forward_asset']][1], supplies[order_matches[0]['backward_asset']][1]) else: last_price = calculate_price(order_matches[0]['backward_quantity'], order_matches[0]['forward_quantity'], supplies[order_matches[0]['backward_asset']][1], supplies[order_matches[0]['forward_asset']][1]) trend = 0 if len(order_matches) == 2: if order_matches[1]['forward_asset'] == base_asset: before_last_price = calculate_price(order_matches[0]['forward_quantity'], order_matches[0]['backward_quantity'], supplies[order_matches[0]['forward_asset']][1], supplies[order_matches[0]['backward_asset']][1]) else: before_last_price = calculate_price(order_matches[0]['backward_quantity'], order_matches[0]['forward_quantity'], supplies[order_matches[0]['backward_asset']][1], supplies[order_matches[0]['forward_asset']][1]) if last_price < before_last_price: trend = -1 elif last_price > before_last_price: trend = 1 return D(last_price), trend def get_price_movement(base_asset, quote_asset, supplies=None): yesterday = int(time.time() - (24*60*60)) if not supplies: supplies = get_assets_supply([base_asset, quote_asset]) price, trend = get_pair_price(base_asset, quote_asset, supplies=supplies) price24h, trend24h = get_pair_price(base_asset, quote_asset, max_block_time=yesterday, supplies=supplies) try: progression = (price - price24h) / (price24h / D(100)) except: progression = D(0) return price, trend, price24h, progression @util.block_cache def get_markets_list(mongo_db=None, quote_asset=None, order_by=None): yesterday = int(time.time() - (24*60*60)) markets = [] pairs = [] currencies = ['XCP', 'XBTC'] if not quote_asset else [quote_asset] # pairs with volume last 24h pairs += get_quotation_pairs(exclude_pairs=[], max_pairs=500, from_time=yesterday, include_currencies=currencies) pair_with_volume = [p['pair'] for p in pairs] # pairs without volume last 24h pairs += get_quotation_pairs(exclude_pairs=pair_with_volume, max_pairs=500 - len(pair_with_volume), include_currencies=currencies) base_assets = [p['base_asset'] for p in pairs] quote_assets = [p['quote_asset'] for p in pairs] all_assets = list(set(base_assets + quote_assets)) supplies = get_assets_supply(all_assets) asset_with_image = {} if mongo_db: infos = mongo_db.asset_extended_info.find({'asset': {'$in': all_assets}}, {'_id': 0}) or False for info in infos: if 'info_data' in info and 'valid_image' in info['info_data'] and info['info_data']['valid_image']: asset_with_image[info['asset']] = True for pair in pairs: price, trend, price24h, progression = get_price_movement(pair['base_asset'], pair['quote_asset'], supplies=supplies) market = {} market['base_asset'] = pair['base_asset'] market['quote_asset'] = pair['quote_asset'] market['volume'] = pair['quote_quantity'] if pair['pair'] in pair_with_volume else 0 market['price'] = format(price, ".8f") market['trend'] = trend market['progression'] = format(progression, ".2f") market['price_24h'] = format(price24h, ".8f") market['supply'] = supplies[pair['base_asset']][0] market['base_divisibility'] = supplies[pair['base_asset']][1] market['quote_divisibility'] = supplies[pair['quote_asset']][1] market['market_cap'] = format(D(market['supply']) * D(market['price']), ".4f") market['with_image'] = True if pair['base_asset'] in asset_with_image else False if market['base_asset'] == 'XCP' and market['quote_asset'] == 'BTC': markets.insert(0, market) else: markets.append(market) if order_by in ['price', 'progression', 'supply', 'market_cap']: markets = sorted(markets, key=lambda x: D(x[order_by]), reverse=True) elif order_by in ['base_asset', 'quote_asset']: markets = sorted(markets, key=lambda x: x['order_by']) for m in range(len(markets)): markets[m]['pos'] = m + 1 return markets @util.block_cache def get_market_details(asset1, asset2, min_fee_provided=0.95, max_fee_required=0.95, mongo_db=None): yesterday = int(time.time() - (24*60*60)) base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2) supplies = get_assets_supply([base_asset, quote_asset]) price, trend, price24h, progression = get_price_movement(base_asset, quote_asset, supplies=supplies) buy_orders = [] sell_orders = [] market_orders = get_market_orders(base_asset, quote_asset, supplies=supplies, min_fee_provided=min_fee_provided, max_fee_required=max_fee_required) for order in market_orders: if order['type'] == 'SELL': sell_orders.append(order) elif order['type'] == 'BUY': buy_orders.append(order) last_trades = get_market_trades(base_asset, quote_asset, supplies=supplies) ext_info = False if mongo_db: ext_info = mongo_db.asset_extended_info.find_one({'asset': base_asset}, {'_id': 0}) if ext_info and 'info_data' in ext_info: ext_info = ext_info['info_data'] else: ext_info = False return { 'base_asset': base_asset, 'quote_asset': quote_asset, 'price': format(price, ".8f"), 'trend': trend, 'progression': format(progression, ".2f"), 'price_24h': format(price24h, ".8f"), 'supply': supplies[base_asset][0], 'base_asset_divisible': supplies[base_asset][1], 'quote_asset_divisible': supplies[quote_asset][1], 'buy_orders': sorted(buy_orders, key=lambda x: D(x['price']), reverse=True), 'sell_orders': sorted(sell_orders, key=lambda x: D(x['price'])), 'last_trades': last_trades, 'base_asset_infos': ext_info }
""" Abstraction for optimizers. It is sufficient that one re-implements the base estimator. """ import copy import inspect import numbers from collections import Iterable import numpy as np from ..callbacks import check_callback from ..callbacks import VerboseCallback from .optimizer import Optimizer from ..utils import eval_callbacks def base_minimize(func, dimensions, base_estimator, n_calls=100, n_random_starts=10, acq_func="EI", acq_optimizer="lbfgs", x0=None, y0=None, random_state=None, verbose=False, callback=None, n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96, n_jobs=1): """ Parameters ---------- * `func` [callable]: Function to minimize. Should take a single list of parameters and return the objective value. If you have a search-space where all dimensions have names, then you can use `skopt.utils.use_named_args` as a decorator on your objective function, in order to call it directly with the named arguments. See `use_named_args` for an example. * `dimensions` [list, shape=(n_dims,)]: List of search space dimensions. Each search dimension can be defined either as - a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions), - a `(lower_bound, upper_bound, "prior")` tuple (for `Real` dimensions), - as a list of categories (for `Categorical` dimensions), or - an instance of a `Dimension` object (`Real`, `Integer` or `Categorical`). NOTE: The upper and lower bounds are inclusive for `Integer` dimensions. * `base_estimator` [sklearn regressor]: Should inherit from `sklearn.base.RegressorMixin`. In addition, should have an optional `return_std` argument, which returns `std(Y | x)`` along with `E[Y | x]`. * `n_calls` [int, default=100]: Maximum number of calls to `func`. An objective fucntion will always be evaluated this number of times; Various options to supply initialization points do not affect this value. * `n_random_starts` [int, default=10]: Number of evaluations of `func` with random points before approximating it with `base_estimator`. * `acq_func` [string, default=`"EI"`]: Function to minimize over the posterior distribution. Can be either - `"LCB"` for lower confidence bound, - `"EI"` for negative expected improvement, - `"PI"` for negative probability of improvement. - `"EIps" for negated expected improvement per second to take into account the function compute time. Then, the objective function is assumed to return two values, the first being the objective value and the second being the time taken in seconds. - `"PIps"` for negated probability of improvement per second. The return type of the objective function is assumed to be similar to that of `"EIps * `acq_optimizer` [string, `"sampling"` or `"lbfgs"`, default=`"lbfgs"`]: Method to minimize the acquistion function. The fit model is updated with the optimal value obtained by optimizing `acq_func` with `acq_optimizer`. - If set to `"sampling"`, then `acq_func` is optimized by computing `acq_func` at `n_points` randomly sampled points and the smallest value found is used. - If set to `"lbfgs"`, then - The `n_restarts_optimizer` no. of points which the acquisition function is least are taken as start points. - `"lbfgs"` is run for 20 iterations with these points as initial points to find local minima. - The optimal of these local minima is used to update the prior. * `x0` [list, list of lists or `None`]: Initial input points. - If it is a list of lists, use it as a list of input points. If no corresponding outputs `y0` are supplied, then len(x0) of total calls to the objective function will be spent evaluating the points in `x0`. If the corresponding outputs are provided, then they will be used together with evaluated points during a run of the algorithm to construct a surrogate. - If it is a list, use it as a single initial input point. The algorithm will spend 1 call to evaluate the initial point, if the outputs are not provided. - If it is `None`, no initial input points are used. * `y0` [list, scalar or `None`] Objective values at initial input points. - If it is a list, then it corresponds to evaluations of the function at each element of `x0` : the i-th element of `y0` corresponds to the function evaluated at the i-th element of `x0`. - If it is a scalar, then it corresponds to the evaluation of the function at `x0`. - If it is None and `x0` is provided, then the function is evaluated at each element of `x0`. * `random_state` [int, RandomState instance, or None (default)]: Set random state to something other than None for reproducible results. * `verbose` [boolean, default=False]: Control the verbosity. It is advised to set the verbosity to True for long optimization runs. * `callback` [callable, list of callables, optional] If callable then `callback(res)` is called after each call to `func`. If list of callables, then each callable in the list is called. * `n_points` [int, default=10000]: If `acq_optimizer` is set to `"sampling"`, then `acq_func` is optimized by computing `acq_func` at `n_points` randomly sampled points. * `n_restarts_optimizer` [int, default=5]: The number of restarts of the optimizer when `acq_optimizer` is `"lbfgs"`. * `xi` [float, default=0.01]: Controls how much improvement one wants over the previous best values. Used when the acquisition is either `"EI"` or `"PI"`. * `kappa` [float, default=1.96]: Controls how much of the variance in the predicted values should be taken into account. If set to be very high, then we are favouring exploration over exploitation and vice versa. Used when the acquisition is `"LCB"`. * `n_jobs` [int, default=1]: Number of cores to run in parallel while running the lbfgs optimizations over the acquisition function. Valid only when `acq_optimizer` is set to "lbfgs." Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set to number of cores. Returns ------- * `res` [`OptimizeResult`, scipy object]: The optimization result returned as a OptimizeResult object. Important attributes are: - `x` [list]: location of the minimum. - `fun` [float]: function value at the minimum. - `models`: surrogate models used for each iteration. - `x_iters` [list of lists]: location of function evaluation for each iteration. - `func_vals` [array]: function value for each iteration. - `space` [Space]: the optimization space. - `specs` [dict]`: the call specifications. - `rng` [RandomState instance]: State of the random state at the end of minimization. For more details related to the OptimizeResult object, refer http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html """ specs = {"args": copy.copy(inspect.currentframe().f_locals), "function": inspect.currentframe().f_code.co_name} acq_optimizer_kwargs = { "n_points": n_points, "n_restarts_optimizer": n_restarts_optimizer, "n_jobs": n_jobs} acq_func_kwargs = {"xi": xi, "kappa": kappa} # Initialize optimization # Suppose there are points provided (x0 and y0), record them # check x0: list-like, requirement of minimal points if x0 is None: x0 = [] elif not isinstance(x0[0], (list, tuple)): x0 = [x0] if not isinstance(x0, list): raise ValueError("`x0` should be a list, but got %s" % type(x0)) if n_random_starts <= 0 and not x0: raise ValueError("Either set `n_random_starts` > 0," " or provide `x0`") # check y0: list-like, requirement of maximal calls if isinstance(y0, Iterable): y0 = list(y0) elif isinstance(y0, numbers.Number): y0 = [y0] required_calls = n_random_starts + (len(x0) if not y0 else 0) if n_calls < required_calls: raise ValueError( "Expected `n_calls` >= %d, got %d" % (required_calls, n_calls)) # calculate the total number of initial points n_initial_points = n_random_starts + len(x0) # Build optimizer # create optimizer class optimizer = Optimizer(dimensions, base_estimator, n_initial_points=n_initial_points, acq_func=acq_func, acq_optimizer=acq_optimizer, random_state=random_state, acq_optimizer_kwargs=acq_optimizer_kwargs, acq_func_kwargs=acq_func_kwargs) # check x0: element-wise data type, dimensionality assert all(isinstance(p, Iterable) for p in x0) if not all(len(p) == optimizer.space.n_dims for p in x0): raise RuntimeError("Optimization space (%s) and initial points in x0 " "use inconsistent dimensions." % optimizer.space) # check callback callbacks = check_callback(callback) if verbose: callbacks.append(VerboseCallback( n_init=len(x0) if not y0 else 0, n_random=n_random_starts, n_total=n_calls)) # Record provided points # create return object result = None # evaluate y0 if only x0 is provided if x0 and y0 is None: y0 = list(map(func, x0)) n_calls -= len(y0) # record through tell function if x0: if not (isinstance(y0, Iterable) or isinstance(y0, numbers.Number)): raise ValueError( "`y0` should be an iterable or a scalar, got %s" % type(y0)) if len(x0) != len(y0): raise ValueError("`x0` and `y0` should have the same length") result = optimizer.tell(x0, y0) result.specs = specs if eval_callbacks(callbacks, result): return result # Optimize for n in range(n_calls): next_x = optimizer.ask() next_y = func(next_x) result = optimizer.tell(next_x, next_y) result.specs = specs if eval_callbacks(callbacks, result): break return result
import cmd import locale import os import pprint import shlex from dropbox import client, rest, session # XXX Fill in your consumer key and secret below # You can find these at http://www.dropbox.com/developers/apps APP_KEY = '' APP_SECRET = '' ACCESS_TYPE = 'app_folder' # should be 'dropbox' or 'app_folder' as configured for your app def command(login_required=True): """a decorator for handling authentication and exceptions""" def decorate(f): def wrapper(self, args): if login_required and not self.sess.is_linked(): self.stdout.write("Please 'login' to execute this command\n") return try: return f(self, *args) except TypeError, e: self.stdout.write(str(e) + '\n') except rest.ErrorResponse, e: msg = e.user_error_msg or str(e) self.stdout.write('Error: %s\n' % msg) wrapper.__doc__ = f.__doc__ return wrapper return decorate class DropboxTerm(cmd.Cmd): def __init__(self, app_key, app_secret): cmd.Cmd.__init__(self) self.sess = StoredSession(app_key, app_secret, access_type=ACCESS_TYPE) self.api_client = client.DropboxClient(self.sess) self.current_path = '' self.prompt = "Dropbox> " self.sess.load_creds() @command() def do_ls(self): """list files in current remote directory""" resp = self.api_client.metadata(self.current_path) if 'contents' in resp: for f in resp['contents']: name = os.path.basename(f['path']) encoding = locale.getdefaultlocale()[1] self.stdout.write(('%s\n' % name).encode(encoding)) @command() def do_cd(self, path): """change current working directory""" if path == "..": self.current_path = "/".join(self.current_path.split("/")[0:-1]) else: self.current_path += "/" + path @command(login_required=False) def do_login(self): """log in to a Dropbox account""" try: self.sess.link() except rest.ErrorResponse, e: self.stdout.write('Error: %s\n' % str(e)) @command() def do_logout(self): """log out of the current Dropbox account""" self.sess.unlink() self.current_path = '' @command() def do_cat(self, path): """display the contents of a file""" f, metadata = self.api_client.get_file_and_metadata(self.current_path + "/" + path) self.stdout.write(f.read()) self.stdout.write("\n") @command() def do_mkdir(self, path): """create a new directory""" self.api_client.file_create_folder(self.current_path + "/" + path) @command() def do_rm(self, path): """delete a file or directory""" self.api_client.file_delete(self.current_path + "/" + path) @command() def do_mv(self, from_path, to_path): """move/rename a file or directory""" self.api_client.file_move(self.current_path + "/" + from_path, self.current_path + "/" + to_path) @command() def do_account_info(self): """display account information""" f = self.api_client.account_info() pprint.PrettyPrinter(indent=2).pprint(f) @command(login_required=False) def do_exit(self): """exit""" return True @command() def do_get(self, from_path, to_path): """ Copy file from Dropbox to local file and print out out the metadata. Examples: Dropbox> get file.txt ~/dropbox-file.txt """ to_file = open(os.path.expanduser(to_path), "wb") f, metadata = self.api_client.get_file_and_metadata(self.current_path + "/" + from_path) print 'Metadata:', metadata to_file.write(f.read()) @command() def do_thumbnail(self, from_path, to_path, size='large', format='JPEG'): """ Copy an image file's thumbnail to a local file and print out the file's metadata. Examples: Dropbox> thumbnail file.txt ~/dropbox-file.txt medium PNG """ to_file = open(os.path.expanduser(to_path), "wb") f, metadata = self.api_client.thumbnail_and_metadata( self.current_path + "/" + from_path, size, format) print 'Metadata:', metadata to_file.write(f.read()) @command() def do_put(self, from_path, to_path): """ Copy local file to Dropbox Examples: Dropbox> put ~/test.txt dropbox-copy-test.txt """ from_file = open(os.path.expanduser(from_path), "rb") self.api_client.put_file(self.current_path + "/" + to_path, from_file) @command() def do_search(self, string): """Search Dropbox for filenames containing the given string.""" results = self.api_client.search(self.current_path, string) for r in results: self.stdout.write("%s\n" % r['path']) @command(login_required=False) def do_help(self): # Find every "do_" attribute with a non-empty docstring and print # out the docstring. all_names = dir(self) cmd_names = [] for name in all_names: if name[:3] == 'do_': cmd_names.append(name[3:]) cmd_names.sort() for cmd_name in cmd_names: f = getattr(self, 'do_' + cmd_name) if f.__doc__: self.stdout.write('%s: %s\n' % (cmd_name, f.__doc__)) # the following are for command line magic and aren't Dropbox-related def emptyline(self): pass def do_EOF(self, line): self.stdout.write('\n') return True def parseline(self, line): parts = shlex.split(line) if len(parts) == 0: return None, None, line else: return parts[0], parts[1:], line class StoredSession(session.DropboxSession): """a wrapper around DropboxSession that stores a token to a file on disk""" TOKEN_FILE = "token_store.txt" def load_creds(self): try: stored_creds = open(self.TOKEN_FILE).read() self.set_token(*stored_creds.split('|')) print "[loaded access token]" except IOError: pass # don't worry if it's not there def write_creds(self, token): f = open(self.TOKEN_FILE, 'w') f.write("|".join([token.key, token.secret])) f.close() def delete_creds(self): os.unlink(self.TOKEN_FILE) def link(self): request_token = self.obtain_request_token() url = self.build_authorize_url(request_token) print "url:", url print "Please authorize in the browser. After you're done, press enter." raw_input() self.obtain_access_token(request_token) self.write_creds(self.token) def unlink(self): self.delete_creds() session.DropboxSession.unlink(self) def main(): if APP_KEY == '' or APP_SECRET == '': exit("You need to set your APP_KEY and APP_SECRET!") term = DropboxTerm(APP_KEY, APP_SECRET) term.cmdloop() if __name__ == '__main__': main()
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class CurrentCallList(ListResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version): """ Initialize the CurrentCallList :param Version version: Version that contains the resource :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallList :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallList """ super(CurrentCallList, self).__init__(version) # Path Solution self._solution = {} def get(self): """ Constructs a CurrentCallContext :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext """ return CurrentCallContext(self._version, ) def __call__(self): """ Constructs a CurrentCallContext :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext """ return CurrentCallContext(self._version, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Preview.TrustedComms.CurrentCallList>' class CurrentCallPage(Page): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, response, solution): """ Initialize the CurrentCallPage :param Version version: Version that contains the resource :param Response response: Response from the API :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallPage :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallPage """ super(CurrentCallPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of CurrentCallInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance """ return CurrentCallInstance(self._version, payload, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Preview.TrustedComms.CurrentCallPage>' class CurrentCallContext(InstanceContext): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version): """ Initialize the CurrentCallContext :param Version version: Version that contains the resource :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext """ super(CurrentCallContext, self).__init__(version) # Path Solution self._solution = {} self._uri = '/CurrentCall'.format(**self._solution) def fetch(self): """ Fetch a CurrentCallInstance :returns: Fetched CurrentCallInstance :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return CurrentCallInstance(self._version, payload, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Preview.TrustedComms.CurrentCallContext {}>'.format(context) class CurrentCallInstance(InstanceResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, payload): """ Initialize the CurrentCallInstance :returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance """ super(CurrentCallInstance, self).__init__(version) # Marshaled Properties self._properties = { 'sid': payload.get('sid'), 'from_': payload.get('from'), 'to': payload.get('to'), 'status': payload.get('status'), 'reason': payload.get('reason'), 'created_at': deserialize.iso8601_datetime(payload.get('created_at')), 'caller': payload.get('caller'), 'logo': payload.get('logo'), 'bg_color': payload.get('bg_color'), 'font_color': payload.get('font_color'), 'use_case': payload.get('use_case'), 'manager': payload.get('manager'), 'shield_img': payload.get('shield_img'), 'url': payload.get('url'), } # Context self._context = None self._solution = {} @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: CurrentCallContext for this CurrentCallInstance :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext """ if self._context is None: self._context = CurrentCallContext(self._version, ) return self._context @property def sid(self): """ :returns: A string that uniquely identifies this current phone call. :rtype: unicode """ return self._properties['sid'] @property def from_(self): """ :returns: The originating phone number :rtype: unicode """ return self._properties['from_'] @property def to(self): """ :returns: The terminating phone number :rtype: unicode """ return self._properties['to'] @property def status(self): """ :returns: The status of the current phone call :rtype: unicode """ return self._properties['status'] @property def reason(self): """ :returns: The business reason for this current phone call :rtype: unicode """ return self._properties['reason'] @property def created_at(self): """ :returns: The date this current phone call was created :rtype: datetime """ return self._properties['created_at'] @property def caller(self): """ :returns: Caller name of the current phone call :rtype: unicode """ return self._properties['caller'] @property def logo(self): """ :returns: Logo URL of the caller :rtype: unicode """ return self._properties['logo'] @property def bg_color(self): """ :returns: Background color of the current phone call :rtype: unicode """ return self._properties['bg_color'] @property def font_color(self): """ :returns: Font color of the current phone call :rtype: unicode """ return self._properties['font_color'] @property def use_case(self): """ :returns: The use case for the current phone call :rtype: unicode """ return self._properties['use_case'] @property def manager(self): """ :returns: The name of the CPS organization :rtype: unicode """ return self._properties['manager'] @property def shield_img(self): """ :returns: Shield image URL that serves as authenticity proof of the current phone call :rtype: unicode """ return self._properties['shield_img'] @property def url(self): """ :returns: The URL of this resource. :rtype: unicode """ return self._properties['url'] def fetch(self): """ Fetch a CurrentCallInstance :returns: Fetched CurrentCallInstance :rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance """ return self._proxy.fetch() def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Preview.TrustedComms.CurrentCallInstance {}>'.format(context)
import six from sqlalchemy import types from ..exceptions import ImproperlyConfigured from .scalar_coercible import ScalarCoercible try: from enum import Enum except ImportError: Enum = None class Choice(object): def __init__(self, code, value): self.code = code self.value = value def __eq__(self, other): if isinstance(other, Choice): return self.code == other.code return other == self.code def __ne__(self, other): return not (self == other) def __unicode__(self): return six.text_type(self.value) def __repr__(self): return 'Choice(code={code}, value={value})'.format( code=self.code, value=self.value ) class ChoiceType(types.TypeDecorator, ScalarCoercible): """ ChoiceType offers way of having fixed set of choices for given column. It could work with a list of tuple (a collection of key-value pairs), or integrate with :mod:`enum` in the standard library of Python 3.4+ (the enum34_ backported package on PyPI is compatible too for ``< 3.4``). .. _enum34: https://pypi.python.org/pypi/enum34 Columns with ChoiceTypes are automatically coerced to Choice objects while a list of tuple been passed to the constructor. If a subclass of :class:`enum.Enum` is passed, columns will be coerced to :class:`enum.Enum` objects instead. :: class User(Base): TYPES = [ (u'admin', u'Admin'), (u'regular-user', u'Regular user') ] __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255)) type = sa.Column(ChoiceType(TYPES)) user = User(type=u'admin') user.type # Choice(type='admin', value=u'Admin') Or:: import enum class UserType(enum.Enum): admin = 1 regular = 2 class User(Base): __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255)) type = sa.Column(ChoiceType(UserType, impl=sa.Integer())) user = User(type=1) user.type # <UserType.admin: 1> ChoiceType is very useful when the rendered values change based on user's locale: :: from babel import lazy_gettext as _ class User(Base): TYPES = [ (u'admin', _(u'Admin')), (u'regular-user', _(u'Regular user')) ] __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255)) type = sa.Column(ChoiceType(TYPES)) user = User(type=u'admin') user.type # Choice(type='admin', value=u'Admin') print user.type # u'Admin' Or:: from enum import Enum from babel import lazy_gettext as _ class UserType(Enum): admin = 1 regular = 2 UserType.admin.label = _(u'Admin') UserType.regular.label = _(u'Regular user') class User(Base): __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255)) type = sa.Column(ChoiceType(UserType, impl=sa.Integer())) user = User(type=UserType.admin) user.type # <UserType.admin: 1> print user.type.label # u'Admin' """ impl = types.Unicode(255) def __init__(self, choices, impl=None): self.choices = choices if ( Enum is not None and isinstance(choices, type) and issubclass(choices, Enum) ): self.type_impl = EnumTypeImpl(enum_class=choices) else: self.type_impl = ChoiceTypeImpl(choices=choices) if impl: self.impl = impl @property def python_type(self): return self.impl.python_type def _coerce(self, value): return self.type_impl._coerce(value) def process_bind_param(self, value, dialect): return self.type_impl.process_bind_param(value, dialect) def process_result_value(self, value, dialect): return self.type_impl.process_result_value(value, dialect) class ChoiceTypeImpl(object): """The implementation for the ``Choice`` usage.""" def __init__(self, choices): if not choices: raise ImproperlyConfigured( 'ChoiceType needs list of choices defined.' ) self.choices_dict = dict(choices) def _coerce(self, value): if value is None: return value if isinstance(value, Choice): return value return Choice(value, self.choices_dict[value]) def process_bind_param(self, value, dialect): if value and isinstance(value, Choice): return value.code return value def process_result_value(self, value, dialect): if value: return Choice(value, self.choices_dict[value]) return value class EnumTypeImpl(object): """The implementation for the ``Enum`` usage.""" def __init__(self, enum_class): if Enum is None: raise ImproperlyConfigured( "'enum34' package is required to use 'EnumType' in Python " "< 3.4" ) if not issubclass(enum_class, Enum): raise ImproperlyConfigured( "EnumType needs a class of enum defined." ) self.enum_class = enum_class def _coerce(self, value): if value is None: return None return self.enum_class(value) def process_bind_param(self, value, dialect): if value is None: return None return self.enum_class(value).value def process_result_value(self, value, dialect): return self._coerce(value)
import Tkinter as tk import numpy as np import sys import os.path import xml.etree.ElementTree as ET import fnmatch import cv2 import numpy as np import face import shutil import time from picamera import PiCamera from picamera.array import PiRGBArray import speaker #import FinalFaceRecog as fr TRAINING_FILE='train_LBPH.xml' BASE_PATH="training/negative" cascadePath = "haarcascade_frontalface_alt.xml" LOOKUP_FILE='lookup_table.txt' ENROLLMENT_FILE='enrollment.txt' CSV_FILE='CSV.txt' class BioLock(tk.Tk): def __init__(self, *args, **kwargs): tk.Tk.__init__(self, *args, **kwargs) # self.attributes('-fullscreen',True) container = tk.Frame(self) container.pack(side="top", fill="both", expand=True) container.grid_rowconfigure(0, weight=3) container.grid_columnconfigure(0, weight=3) newcontainer=tk.Frame(self) newcontainer.pack(side="bottom",fill="both",expand=True) container.grid_rowconfigure(1, weight=7) container.grid_columnconfigure(0, weight=7) self.text = tk.Text(newcontainer, wrap="word") self.text.pack(side="top", fill="both", expand=True) self.text.tag_configure("stdout", foreground="#b22222") sys.stdout = TextRedirector(self.text, "stdout") sys.stderr = TextRedirector(self.text, "stderr") ############# Here self.adminDict = {} self.accessDict = {} self.codeDict = {} tree = ET.parse('voicedata.xml') root = tree.getroot() for child in root: idString = child.tag ID = int(idString[1:len(idString)]) print ID self.adminDict[ID] = child.get("admin") self.accessDict[ID] = child.get("access") self.codeDict[ID] = np.load('Arrays/{}.npy'.format(ID)) ############## To here # Initalize new user variables self.newAccess = False self.newAccessString = None self.newAdmin = False self.newAdminString = None self.newID = -1 self.newCode = None # Initalize test variables self.access = False self.admin = False self.id = -1 self.code = None # Initialize Frames self.frames = {} #######Copy this for f in [faceCapture, voiceCapture, enroll, enrollFace, enrollVoice, success, failure,enrollDisclaim,authDisclaim,success_notAdmin,captureImage,voiceCapture_again]: frame = f(container, self) self.frames[f] = frame frame.grid(row=0, column=0, sticky="nsew") if len(self.codeDict) == 0: self.show_frame(enrollDisclaim) else: self.show_frame(authDisclaim) ####### To here # Brings specified frame to front of app def show_frame(self, cont): frame = self.frames[cont] frame.tkraise() def enrollPriv(self, access, admin): if access.get() == "Yes": self.newAccess = True if admin.get() == "Yes": self.newAdmin = True self.show_frame(captureImage) def facecap(self): name="Test" FaceRecognizer.ImageCapture(name) self.show_frame(enrollFace) def enroll(self): name = "Test" self.newID = FaceRecognizer.LBPHupdate(name) print self.newID self.show_frame(enrollVoice) def enrollVoice(self): # Voice enrollment code goes here self.newCode = speaker.train() # Save data to dictionary and storage self.adminDict[self.newID] = self.newAdmin self.accessDict[self.newID] = self.newAccess self.codeDict[self.newID] = self.newCode self.show_frame(authDisclaim) np.save('Arrays/{}'.format(self.newID),self.newCode) tree = ET.parse('voicedata.xml') root = tree.getroot() element = ET.SubElement(root,'s'+str(self.newID),admin=str(self.newAdmin),access=str(self.newAccess)) tree.write('voicedata.xml') def faceAuth(self): # Facial Recognition # successful=fr.Authenticate() #Add id successful,label = FaceRecognizer.Authenticate() self.id=label print successful print label if successful == 1 and self.id in self.codeDict: self.code = self.codeDict[self.id] self.show_frame(voiceCapture) else: self.id = -1 self.show_frame(failure) def voiceAuth(self): # Run speaker Recognition (including voice capture) successful = speaker.test(self.code) if successful is True: self.admin = self.adminDict[self.id] self.access = self.accessDict[self.id] print self.access+","+self.admin #print str(bool(self.access))+","+str(bool(self.admin)) if self.access=="True": if self.admin=="True": #print "!!"+self.admin self.show_frame(success) else: #print "!?"+self.admin self.show_frame(success_notAdmin) else: self.admin = False self.id = -1 self.code = None self.show_frame(failure) else: self.admin = False self.id = -1 self.code = None self.show_frame(voiceCapture_again) def unlock(self): print "Send Unlock Signal" self.access = False self.admin = False self.id = -1 self.code = None self.show_frame(faceCapture) def cancel(self): self.newAccess = False self.newAccessString = None self.newAdmin = False self.newAdminString = None self.newID = 0 self.newCode = None self.access = False self.admin = False self.id = -1 self.code = None self.show_frame(authDisclaim) def newUser(self): if self.admin=="True": self.show_frame(enrollDisclaim) def startEnroll(self): self.show_frame(enroll) def startAuth(self): self.show_frame(faceCapture) # # Frames # class enrollDisclaim(tk.Frame): def __init__(self,parent,controller): tk.Frame.__init__(self,parent) label = tk.Label(self, text="Enrollment Disclaimer") label.pack() disclaimer = tk.Label(self, text="This program will collect your biometric information, including a photo of your face as well as your voice. Using this device conveys your acceptance of these terms. Tap OK to continue.") disclaimer.pack() button = tk.Button(self, text='OK', command=controller.startEnroll) button.pack() class authDisclaim(tk.Frame): def __init__(self,parent,controller): tk.Frame.__init__(self,parent) label = tk.Label(self, text="Disclaimer") label.pack() disclaimer = tk.Label(self, text="This program will collect your biometric information, including a photo of your face as well as your voice. Using this device conveys your acceptance of these terms. Tap OK to continue.") disclaimer.pack() button = tk.Button(self, text='OK', command=controller.startAuth) button.pack() class enroll(tk.Frame): def __init__(self,parent,controller): tk.Frame.__init__(self,parent) #controller.newAccessString = tk.StringVar(controller) #controller.newAccessString.set("Yes") #controller.newAdminString = tk.StringVar(controller) #controller.newAdminString.set("No") access = tk.StringVar(controller) access.set("Yes") admin = tk.StringVar(controller) admin.set("No") label = tk.Label(self, text= "Enroll new user") label.pack() accessLabel = tk.Label(self, text="Give new user access to system?") accessLabel.pack() #accessMenu = tk.OptionMenu(self,controller.newAccessString,"Yes","No") accessMenu = tk.OptionMenu(self,access,"Yes","No") accessMenu.pack() adminLabel = tk.Label(self, text="Give new user admin rights") adminLabel.pack() #adminMenu = tk.OptionMenu(self, controller.newAdminString,"Yes","No") adminMenu = tk.OptionMenu(self, admin,"Yes","No") adminMenu.pack() #button = tk.Button(self,text = "Next", command=controller.enrollPriv) button = tk.Button(self,text = "Next", command=lambda: controller.enrollPriv(access,admin)) button.pack() cancel = tk.Button(self,text= "Cancel", command=controller.cancel) cancel.pack() create = tk.Button(self,text="Create CSV",command=FaceRecognizer.create_csv) create.pack(side="left") train = tk.Button(self,text="Load in Database",command=FaceRecognizer.trainLBPH) train.pack(side="left") class captureImage(tk.Frame): def __init__(self,parent,controller): self.controller=controller tk.Frame.__init__(self,parent) label = tk.Label(self, text="Enrollment Face Capture") label.pack() toolbar=tk.Frame(self) toolbar.pack(side="top",fill="x") button = tk.Button(self, text="Capture Facial Data", command=controller.facecap) button.pack(in_=toolbar,side="left") class enrollFace(tk.Frame): def __init__(self, parent, controller): self.controller = controller tk.Frame.__init__(self, parent) label = tk.Label(self, text="Enrollment Face Capture") label.pack() label_question=tk.Label(self,text="Do you want to re-capture your images?") label_question.pack(side="left") button_y = tk.Button(self, text="Yes", command=controller.facecap) button_y.pack(side="left") button_n = tk.Button(self, text="No", command=controller.enroll) button_n.pack(side="left") class enrollVoice(tk.Frame): def __init__(self,parent,controller): self.controller = controller tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Enrollment Voice Capture") label.pack() instr = tk.Label(self, text="After pushing the button please say your passphrase") instr.pack() button = tk.Button(self, text = "Record Voice Sample", command=controller.enrollVoice) button.pack() cancel = tk.Button(self,text= "Cancel", command=controller.cancel) cancel.pack() class faceCapture(tk.Frame): def __init__(self,parent,controller): self.controller = controller tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Face Image Capture") label.pack() #added new layout toolbar = tk.Frame(self) toolbar.pack(side="top", fill="x") b2 = tk.Button(self, text="authenticate", command=controller.faceAuth) b2.pack(in_=toolbar, side="left") class voiceCapture(tk.Frame): def __init__(self,parent,controller): self.controller = controller tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Voice Capture") label.pack() instr = tk.Label(self, text="After pushing the button please say your passphrase") button = tk.Button(self, text = "Ready to Record", command=controller.voiceAuth) button.pack() cancel = tk.Button(self,text= "Cancel", command=controller.cancel) cancel.pack() class voiceCapture_again(tk.Frame): def __init__(self,parent,controller): self.controller = controller tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Voice Capture") label.pack() instr = tk.Label(self, text="Do you want to try again?") button_y = tk.Button(self, text = "Yes", command=controller.voiceAuth) button_y.pack() cancel = tk.Button(self,text= "No", command=controller.cancel) cancel.pack() class success(tk.Frame): def __init__(self,parent,controller): tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Authentication Successful") label.pack() unlock = tk.Button(self, text= "Unlock System", command= controller.unlock) unlock.pack() enroll = tk.Button(self, text="Enroll new User", command=controller.newUser) enroll.pack() class success_notAdmin(tk.Frame): def __init__(self,parent,controller): tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Authentication Successful") label.pack() unlock = tk.Button(self, text= "Unlock System", command= controller.unlock) unlock.pack() class failure(tk.Frame): def __init__(self,parent,controller): tk.Frame.__init__(self,parent) label = tk.Label(self, text= "Authentication Failed") label.pack() button = tk.Button(self, text = "OK", command=lambda: controller.show_frame(faceCapture)) button.pack() class FaceRecognizer(object): @classmethod def walk_files(cls,directory, match='*'): """Generator function to iterate through all files in a directory recursively which match the given filename match parameter. """ for root, dirs, files in os.walk(directory): for filename in fnmatch.filter(files, match): yield os.path.join(root, filename) @classmethod def prepare_image(cls,filename): """Read an image as grayscale and resize it to the appropriate size for training the face recognition model. """ return face.resize(cv2.imread(filename, cv2.IMREAD_GRAYSCALE)) @classmethod def normalize(cls,X, low, high, dtype=None): """Normalizes a given array in X to a value between low and high.""" X = np.asarray(X) minX, maxX = np.min(X), np.max(X) # normalize to [0...1]. X = X - float(minX) X = X / float((maxX - minX)) # scale to [low...high]. X = X * (high-low) X = X + low if dtype is None: return np.asarray(X) return np.asarray(X, dtype=dtype) #----------------------------------------------------------------------------------------------Load LOOKUP TABLE @classmethod def load_table(cls,filename,lookup_table,sample_images): t=open(filename,'r+') line=t.readline() #lookup_table=[] #sample_images=[] while line!="": two=line.split(";") folder_name=two[0] imageName=two[1] lookup_table.append(folder_name) sample_images.append(imageName) #print "folder: "+folder_name+ " !!" +imageName line=t.readline() #----------------------------------------------------------------------------------------------Create CSV and LOOKUP table @classmethod def create_csv(cls): #if len(sys.argv) != 2: #print "usage: create_csv <base_path>" #sys.exit(1) SEPARATOR=";" lookup_table=[] f=open(CSV_FILE,'w') t=open(LOOKUP_FILE,'w') label = 0 for dirname, dirnames, filenames in os.walk(BASE_PATH): for subdirname in dirnames: print "!! "+subdirname #subject_path = os.path.join(dirname, subdirname) subject_path ="%s/%s" % (dirname, subdirname) for filename in os.listdir(subject_path): abs_path = "%s\%s" % (subject_path, filename) # added to create right directorys in linux abs_path2="%s/%s" % (subject_path,filename) seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n']) #Sprint seq f.write(seq) #print "%s%s%d" % (abs_path, SEPARATOR, label) label = label + 1 lookup_table.append(subdirname) t.write(''.join([str(subdirname),';',abs_path2,';\n'])); print lookup_table # use lookup_table[label] to look up the specific folder of that label f.close() t.close() #--------------------------------------------------------------------------------------------TRAIN THE SYSTEM (RUN ONLY ONCE) @classmethod def trainLBPH(cls): faces = [] labels = [] labelnum=[] temp=10000 totalcount=0 f=open(CSV_FILE,'r+') s=f.readline() while s!="": #print s list=s.split(';') #print list path=str(list[0]).split('\\') #print path[0] num=str(list[1]).split('\n') if temp!=int(num[0]): temp=int(num[0]) print num[0] tempcount=0 labelnum.append(int(num[0])) for filename in cls.walk_files(path[0],'*.pgm'): #print filename faces.append(cls.prepare_image(filename)) labels.append(labelnum[int(num[0])]) tempcount += 1 totalcount += 1 else: while tempcount > 0: s=f.readline() tempcount -= 1 print num[0]+":"+s+"!!"+str(tempcount)+"\n" continue print 'Read', totalcount, 'images' #print np.asarray(labels).shape #print np.asarray(faces).shape #Train model print 'Training model...' model = cv2.createLBPHFaceRecognizer() model.train(np.asarray(faces), np.asarray(labels)) #Save model results model.save(TRAINING_FILE) print 'Training data saved to', TRAINING_FILE #-------------------------------------------------------------------------------------Enroll and update @classmethod def ImageCapture(cls,ID): labels=[] images=[] # make sure this is the right file name faceCascade = cv2.CascadeClassifier(cascadePath) counter=0 #counter2=0 foldername=ID; if not os.path.exists(foldername): os.makedirs(foldername) name=foldername+"/Images" camera=PiCamera() camera.resolution=(320,240) camera.framerate=32 rawCapture=PiRGBArray(camera,size=(320,240)) time.sleep(3) cv2.namedWindow("Preview") camera.capture(rawCapture,format="bgr",use_video_port=True) while rawCapture is not None and counter<30: image=rawCapture.array gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) result=face.detect_single(gray) cv2.imshow("Preview",image) if result is None: flag=0 print "could not detect single face. Please retry." else: x,y,w,h=result flag=1 cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) scaled_byRatio=face.crop(gray,x,y,w,h) resized=face.resize(scaled_byRatio) print "Saved captured image No."+str(counter) counter=counter+1 filename = name + str(counter) + ".pgm" cv2.imwrite(filename,resized) rawCapture.truncate(0) camera.capture(rawCapture,format="bgr",use_video_port=True) key=cv2.waitKey(1) camera.close() cv2.destroyWindow("Preview") @classmethod def LBPHupdate(cls,ID): foldername=ID #update database print 'Loading training data...' model=cv2.createLBPHFaceRecognizer() model.load(TRAINING_FILE) print 'Training data loaded!' f=open(CSV_FILE,'r+') t=open(LOOKUP_FILE,'r+') en=open(ENROLLMENT_FILE,'r+') #Get label f.seek(-10,2) s=f.readline() #print s list=s.split(';') num=str(list[1]).split('\n') #new label no. label=int(num[0])+1 #print label f.seek(0,2) t.seek(0,2) en.seek(0,2) faces=[] labels=[] DIRECTORY=foldername #print DIRECTORY SEPARATOR=";" for files in os.listdir(DIRECTORY): abs_path="%s\%s"%(DIRECTORY,files) seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n']) f.write(seq) t.write(''.join([str(DIRECTORY),';',abs_path,';\n'])); en.write(''.join([str(label),'\n'])) f.close() t.close() en.close() for filename in cls.walk_files(DIRECTORY,'*.pgm'): #print filename faces.append(cls.prepare_image(filename)) labels.append(label) model.update(np.asarray(faces), np.asarray(labels)) #print model #Save model results model.save(TRAINING_FILE) print 'Training data saved to',TRAINING_FILE print "successfully updated" shutil.rmtree(foldername) return label #------------------------------------------------------------------------------------------ @classmethod def Authenticate(cls): #load lookup table_ ky tableName=LOOKUP_FILE table=[] samples=[] #self.load_table(tableName,table,samples) # Create window cv2.namedWindow("Preview") #cv2.namedWindow("Compared") # Load training data into model print 'Loading training data...' model = cv2.createLBPHFaceRecognizer() model.load(TRAINING_FILE) print 'Training data loaded!' confidences=[] labels=[] camera=PiCamera() camera.resolution=(320,240) camera.framerate=32 rawCapture=PiRGBArray(camera,size=(320,240)) time.sleep(3) count=30 reccognition=0 print 'Looking for face...' camera.capture(rawCapture,format="bgr",use_video_port=True) while rawCapture is not None: image=rawCapture.array gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) result=face.detect_single(gray) cv2.imshow("Preview",image) key=cv2.waitKey(1) if result is None: print "Please face to the camera " else: x, y, w, h = result # Crop and resize image to face crop = face.resize(face.crop(gray, x, y, w, h)) label, confidence = model.predict(crop) confidences.append(confidence) labels.append(label) cv2.waitKey(1) count -= 1 if count<=0: break rawCapture.truncate(0) camera.capture(rawCapture,format="bgr",use_video_port=True) print "finish capturing faces" camera.close() cv2.destroyWindow("Preview") temp=[] i=0 length=len(labels) while length>0: if i==0: temp.append(labels[length-1]) i += 1 length -= 1 else: tempi=0 while tempi<i: if labels[length-1]!=temp[tempi]: tempi += 1 else: length -=1 break if tempi == i: temp.append(labels[length-1]) i += 1 length -= 1 print "------LABELS:{}".format(labels) print "------DIFFERENT LABELS:{}".format(temp) print "------NUMBER OF DIFFERENT LABELS:{}".format(i) tempi=0 numoflabel=0 if i > 5: print "could not enter" return 0,-1 else: element=temp[tempi] while tempi < i: tempj=0 count=0 while tempj<len(labels): if labels[tempj]==temp[tempi]: count += 1 tempj += 1 if count > numoflabel : numoflabel=count element=temp[tempi] tempi += 1 print "element is {}, numoflabel is {}".format(element, numoflabel) tempi = 0 con=0 while tempi < len(labels): if labels[tempi]==element: con=con+confidences[tempi] tempi += 1 ave=con/numoflabel print "mean of confidences is {}".format(ave) #print confidences # print recognition f=open(ENROLLMENT_FILE,'r') s=f.readline() flag=0 while s!="": index=int(s) #print index if index==element: flag=1 print "flag TRUE" break s=f.readline() if ave < 52 and flag==1: print "authenticated" return 1,element else: print "could not enter" return 0,-1 class TextRedirector(object): def __init__(self, widget, tag="stdout"): self.widget = widget self.tag = tag def write(self, str): self.widget.configure(state="normal") self.widget.insert("end", str, (self.tag,)) self.widget.see(tk.END) self.widget.update_idletasks() self.widget.configure(state="disabled") app = BioLock() app.mainloop()
import ast def translate_call(env, func, argv): stmts = list(func.stmts) args = [] for arg in argv: stmts.extend(arg.stmts) args.append(arg.expr) return Box(env.new_node(ast.Call, func.expr, args, [], None, None), stmts) # Could use for vargs later.. def translate_argument_expr(env, expr): return expr # # args = [] # # varg = None # # for style, expr in argv: # # if style == 'arg': # # args.append(expr) # # elif style == 'vararg': # # varg = expr # # else: # # assert False, "should not happen" # # return env.new_node(ast.Call, func, args, [], varg, None) # # print subj, args # # assert False def translate_stmt_expr(env, ebox): return Box(env.new_node(ast.Expr, ebox.expr), ebox.stmts) def translate_expr_symbol(env, name): if name == '': return Box(env.none()) if name[:1].isdigit(): if '.' in name: return Box(env.new_node(ast.Num, float(name))) return Box(env.new_node(ast.Num, int(name))) return Box(env.new_node(ast.Name, as_python_sym(name), ast.Load())) def translate_lxpr_symbol(env, name): return Box(env.new_node(ast.Name, as_python_sym(name), ast.Store())) def translate_expr_string(env, string): return Box(env.new_node(ast.Str, string)) def translate_return(env, ebox): return Box(env.new_node(ast.Return, ebox.expr), ebox.stmts) def translate_import(env, *aliases): return Box(env.new_node(ast.Import, list(aliases))) def translate_import_from(env, name, aliases): return Box(env.new_node(ast.ImportFrom, as_python_sym(name), aliases, 0)) def translate_as(env, name, alias): return env.new_node(ast.alias, as_python_sym(name), as_python_sym(alias)) def translate_alias_symbol(env, name): return env.new_node(ast.alias, as_python_sym(name), None) def translate_let(env, lbox, ebox): stmts = list(ebox.stmts) + list(lbox.stmts) stmts.append(env.new_node(ast.Assign, [lbox.expr], ebox.expr)) # This is wrong return value for let, but I will fix it later. return Box(env.none(), stmts) def translate_function(env, args, body): name = env.new_sym() stmts = [] for expr in body: stmts.extend(env.statementify(expr)) stmts.append(env.new_node(ast.Return, env.none())) return Box( env.new_node(ast.Name, name, ast.Load()), [env.new_node(ast.FunctionDef, name, ast.arguments([ env.new_node(ast.Name, as_python_sym(a), ast.Param()) for a in args], None, None, []), stmts, [])]) def translate_list(env, *eboxes): stmts = [] exprs = [] for ebox in eboxes: stmts.extend(ebox.stmts) exprs.append(ebox.expr) return Box(env.new_node(ast.List, exprs, ast.Load()), stmts) def translate_tuple(env, *eboxes): stmts = [] exprs = [] for ebox in eboxes: stmts.extend(ebox.stmts) exprs.append(ebox.expr) return Box(env.new_node(ast.Tuple, exprs, ast.Load()), stmts) def translate_dict(env, *pairs): stmts = [] keys = [] values = [] for lbox, rbox in pairs: stmts.extend(lbox.stmts) keys.append(lbox.expr) stmts.extend(rbox.stmts) values.append(rbox.expr) return Box(env.new_node(ast.Dict, keys, values), stmts) def as_python_sym(name): return name.encode('utf-8') # @semantic(stmt, Group('if', [expr], stmt)) # def def_statement(env, cond, body): # return env.new_node(ast.If, cond, body, []) # # @semantic(stmt, Group('while', [expr], stmt)) # def def_statement(env, cond, body): # return env.new_node(ast.While, cond, body, []) # # @semantic(expr, Group('attr', [expr, Symbol()])) # def attr_expression(env, subj, name): # return env.new_node(ast.Attribute, subj, as_python_sym(name), ast.Load()) # # @semantic(expr, Group('cmp', [expr, Symbol(), expr])) # def cmp_expr(env, lhs, op, rhs): # return env.new_node(ast.Compare, lhs, [cmp_operators[op]()], [rhs]) # fix later # # @semantic(expr, Group('list', [], expr)) # def varg_argument(env, exprs): # return env.new_node(ast.List, exprs, ast.Load()) # # @semantic(expr, Group('tuple', [], expr)) # def varg_argument(env, exprs): # return env.new_node(ast.Tuple, exprs, ast.Load()) # # cmp_operators = { # '==': ast.Eq, # '!=': ast.NotEq, # '<': ast.Lt, # '<=': ast.LtE, # '>': ast.Gt, # '>=': ast.GtE, # 'is': ast.Is, # 'isnot': ast.IsNot, # 'in': ast.In, # 'notin': ast.NotIn, # } # # @semantic(expr, Group('infix', [Symbol(), expr])) # def infix_expr(env, op, rhs): # if op not in unary_operators: # raise TranslationError(env.subj, expr) # return env.new_node(ast.UnaryOp, unary_operators[op](), rhs) # # unary_operators = { # '+': ast.UAdd, # '-': ast.USub, # '~': ast.Invert, # 'not': ast.Not, # } # # @semantic(expr, Group('infix', [expr, Symbol(), expr])) # def infix_expr(env, lhs, op, rhs): # if op not in bin_operators: # raise TranslationError(env.subj, expr) # return env.new_node(ast.BinOp, lhs, bin_operators[op](), rhs) # # bin_operators = { # '+': ast.Add, # '-': ast.Sub, # '*': ast.Mult, # '/': ast.Div, # '%': ast.Mod, # '**': ast.Pow, # '<<': ast.LShift, # '>>': ast.RShift, # '|': ast.BitOr, # '^': ast.BitXor, # '&': ast.BitAnd, # '//': ast.FloorDiv, # } # # @semantic(expr, Group('', [expr], argument)) # def call_expression(env, func, argv): # args = [] # varg = None # for style, expr in argv: # if style == 'arg': # args.append(expr) # elif style == 'vararg': # varg = expr # else: # assert False, "should not happen" # return env.new_node(ast.Call, func, args, [], varg, None) # # @semantic(expr, String("float-rgba")) # def float_rgba_expression(env, hexdec): # channels = [c / 255.0 for c in hex_to_rgb(hexdec)] + [1.0] # return env.new_node(ast.Tuple, # [ast.Num(x, lineno=0, col_offset=0) for x in channels[:4]], # ast.Load()) # # def hex_to_rgb(value): # lv = len(value) # return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)) # # @semantic(subscript, Context('expr')) # def expr_subscript(env, expr): # return env.new_node(ast.Index, expr) # # @semantic(expr, Group('sub', [expr, subscript])) # def subscript_expr(env, value, slic): # return env.new_node(ast.Subscript, value, slic, ast.Load()) # # @semantic(expr_set, Group('sub', [expr, subscript])) # def subscript_expr_set(env, value, slic): # return env.new_node(ast.Subscript, value, slic, ast.Store()) # # @semantic(expr_set, Group('attr', [expr, Symbol()])) # def attr_expression(env, subj, name): # return env.new_node(ast.Attribute, subj, as_python_sym(name), ast.Store()) # # def put_error_string(errors, node, message): # if node.document.name is not None: # error = dom.Literal(u"reference", [ # dom.Literal(u"", node.ident), # dom.Literal(u"", unicode(node.document.name)), # dom.Literal(u"", [dom.Literal(u"", unicode(message))])]) # else: # error = dom.Literal(u"reference", [ # dom.Literal(u"", node.ident), # dom.Literal(u"", [dom.Literal(u"", unicode(message))])]) # errors.append(error) # # class Env(object): # def __init__(self): # self.errors = [] # self.subj = None # # class SemanticErrors(Exception): # def __init__(self, document, name): # self.document = document # self.name = name # # def __str__(self): # return "{}".format(self.name) # # if __name__=='__main__': # try: # sys.argv.pop(0) # if len(sys.argv) > 0: # sys.path.insert(0, os.path.dirname(sys.argv[0])) # import_file_to_module("__main__", sys.argv[0]) # sys.exit(0) # else: # sys.stderr.write("usage: treepython.py FILE\n") # sys.exit(1) # except SemanticErrors as ser: # if not sys.stderr.isatty(): # dom.dump(sys.stderr, ser.document) # else: # print "semantic errors, route stderr to file" # sys.exit(1) # except Exception: # if sys.stderr.isatty(): # traceback.print_exc() # sys.stderr.write("Pipeline stderr to a file or invoke from an editor to get structured file.\n") # else: # errors = [] # errors.append(dom.Literal(u'language', u"python_traceback")) # exc_type, exc_value, exc_traceback = sys.exc_info() # for filename, lineno, location, line in traceback.extract_tb(exc_traceback): # if line is not None: # errors.append(dom.Literal(u'tracerecord-text', [ # dom.Literal(u'', filename.decode('utf-8')), # dom.Symbol(unicode(lineno)), # dom.Literal(u'', location.decode('utf-8')), # dom.Literal(u'', line.decode('utf-8')), # ])) # else: # errors.append(dom.Literal(u'tracerecord', [ # dom.Literal(u'', filename.decode('utf-8')), # dom.Literal(u'', lineno_to_ident(lineno)), # ])) # errors.append(dom.Literal(u'tracemessage', [ # dom.Literal(u'', exc_type.__name__.decode('utf-8')), # dom.Literal(u'', str(exc_value).decode('utf-8')), # ])) # document = dom.Document(dom.Literal(u'', errors)) # dom.dump(sys.stderr, document) class Box(object): def __init__(self, expr, stmts=()): self.expr = expr self.stmts = stmts
# Copyright 2012 Viewfinder Inc. All Rights Reserved. """Secrets test. Test secrets module. user vs shared, encrypted vs plain. """ __author__ = 'marc@emailscrubbed.com (Marc Berhault)' import getpass import json import logging import mock import os import shutil import tempfile import unittest from tornado import options from viewfinder.backend.base import ami_metadata, base_options, secrets, testing from viewfinder.backend.base.exceptions import CannotReadEncryptedSecretError class SecretsTestCase(unittest.TestCase): def setUp(self): # Fake out the keyring to None for the entire test. self._prev_keyring = secrets.keyring secrets.keyring = None self._domain = options.options.domain self._prev_user_dir = options.options.user_secrets_dir self._prev_shared_dir = options.options.secrets_dir self._prev_devbox = options.options.devbox # Create tmp directories and set flag values. self._user_dir = tempfile.mkdtemp() options.options.user_secrets_dir = self._user_dir os.mkdir(os.path.join(self._user_dir, self._domain)) self._shared_dir = tempfile.mkdtemp() options.options.secrets_dir = self._shared_dir os.mkdir(os.path.join(self._shared_dir, self._domain)) def tearDown(self): # Recursively delete temp directories and restore flag values. shutil.rmtree(self._user_dir) shutil.rmtree(self._shared_dir) options.options.user_secrets_dir = self._prev_user_dir options.options.secrets_dir = self._prev_shared_dir options.options.devbox = self._prev_devbox secrets.keyring = self._prev_keyring secrets._user_secrets_manager = None secrets._shared_secrets_manager = None def testNoDomainDir(self): """Test secrets manager without a domain dir.""" mgr = secrets.SecretsManager('test', 'fake_domain', self._shared_dir) # We do not fail on Init since we want to be able to support non-existent user secrets. mgr.Init() # Behaves just like an empty secrets manager. self.assertEqual(len(mgr.ListSecrets()), 0) # Trying to add a secret fails. self.assertRaises(IOError, mgr.PutSecret, 'foo', 'codeforfoo') def testPlain(self): """Test secrets manager with plain-text secrets.""" mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) # Empty directory, Init will not require a passphrase. mgr.Init() self.assertEqual(len(mgr.ListSecrets()), 0) self.assertRaises(KeyError, mgr.GetSecret, 'foo') self.assertFalse(mgr.HasSecret('foo')) # Put a secret, but underlying directory doesn't exist (switch domains first). mgr.PutSecret('foo', 'codeforfoo') self.assertTrue(mgr.HasSecret('foo')) self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo') self.assertEqual(len(mgr.ListSecrets()), 1) # Now check that the underlying file exists. with open(os.path.join(self._shared_dir, self._domain, 'foo')) as f: self.assertEqual(f.read(), 'codeforfoo') # Overwrite secret. mgr.PutSecret('foo', 'newcodeforfoo') self.assertEqual(mgr.GetSecret('foo'), 'newcodeforfoo') self.assertEqual(len(mgr.ListSecrets()), 1) # Now check that the underlying file exists. with open(os.path.join(self._shared_dir, self._domain, 'foo')) as f: self.assertEqual(f.read(), 'newcodeforfoo') # Create a new secrets manager. mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init() self.assertTrue(mgr.HasSecret('foo')) self.assertEqual(mgr.GetSecret('foo'), 'newcodeforfoo') self.assertEqual(len(mgr.ListSecrets()), 1) # Passing a passphrase as a flag does not impact plain-text secrets. options.options.passphrase = 'not a passphrase' mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init() self.assertEqual(mgr.GetSecret('foo'), 'newcodeforfoo') def testEncrypted(self): """Test secrets manager with encrypted secrets.""" # The only way to make a secret manager encrypt when empty is to ask it # to prompt for a passphrase. It does so using getpass.getpass. passphrase = 'my voice is my passport!' with mock.patch.object(secrets.getpass, 'getpass') as getpass: getpass.return_value = passphrase mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init(should_prompt=True) # Secret will be encrypted. mgr.PutSecret('foo', 'codeforfoo') self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo') with open(os.path.join(self._shared_dir, self._domain, 'foo')) as f: contents = f.read() self.assertNotEqual(contents, 'codeforfoo') (cipher, ciphertext) = json.loads(contents) self.assertEqual(cipher, 'AES') # TODO(marc): maybe we should test the encryption itself. # Now create a new secrets manager. We do not ask it to prompt, it will figure it out # all by itself. It does this in a number of ways: ##################### --devbox=False ######################## options.options.devbox = False # Set stdin to raise an exception, just to make sure we're not using it. with mock.patch.object(secrets.getpass, 'getpass') as getpass: getpass.side_effect = Exception('you should not be using stdin in --devbox=False mode') # Uses --passphrase if specified. options.options.passphrase = passphrase mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init() self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo') # We get an assertion error when a passphrase is supplied but bad. This is because it fails on sha sum. options.options.passphrase = 'bad passphrase' mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) self.assertRaises(AssertionError, mgr.Init) # Uses AMI metadata otherwise. options.options.passphrase = None # No AMI fetched, or passphrase not one of the fetched fields. mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) self.assertRaisesRegexp(CannotReadEncryptedSecretError, 'failed to fetch passphrase from AWS instance metadata', mgr.Init) # Good passphrase from AMI metadata. ami_metadata.SetAMIMetadata({'user-data/passphrase': passphrase}) mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init() self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo') # Bad passphrase from AMI metadata. ami_metadata.SetAMIMetadata({'user-data/passphrase': 'not a good passphrase.'}) mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) self.assertRaises(AssertionError, mgr.Init) ##################### --devbox=True ######################## options.options.devbox = True # Set bad AMI metadata just to show that we never use it. ami_metadata.SetAMIMetadata({'user-data/passphrase': 'not a good passphrase.'}) # Uses --passphrase if specified. options.options.passphrase = passphrase mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init() self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo') # If --passphrase is None and we cannot prompt, we have no way of getting the passphrase. options.options.passphrase = None mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) self.assertRaisesRegexp(CannotReadEncryptedSecretError, 'passphrase is required but was not provided', mgr.Init, can_prompt=False) # Passphrase is read from stdin if prompting is allowed. with mock.patch.object(secrets.getpass, 'getpass') as getpass: getpass.return_value = passphrase mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) mgr.Init() self.assertEqual(mgr.GetSecret('foo'), 'codeforfoo') # Pass a bad passphrase on stdin. with mock.patch.object(secrets.getpass, 'getpass') as getpass: getpass.return_value = 'not a good passphrase' mgr = secrets.SecretsManager('test', self._domain, self._shared_dir) self.assertRaises(AssertionError, mgr.Init) def testMultipleManagers(self): """Test the secrets managers in their natural habitat: automatic selection of user vs shared based on flags.""" # these may not be None if we've been running other tests using run-tests. secrets._user_secrets_manager = None secrets._shared_secrets_manager = None # Devbox mode: init user secrets, and lazily init shared secrets is requesting a secret on in user secrets. options.options.devbox = True secrets.InitSecrets() self.assertIsNotNone(secrets._user_secrets_manager) self.assertIsNone(secrets._shared_secrets_manager) # Request a secret contained in user secrets: shared secrets remain uninitialized. secrets._user_secrets_manager.PutSecret('foo', 'codeforfoo') self.assertEqual(secrets.GetSecret('foo'), 'codeforfoo') self.assertIsNotNone(secrets._user_secrets_manager) self.assertIsNone(secrets._shared_secrets_manager) # Request a secret not contained anywhere. As soon as we notice that it's not in user secrets, we initialize # the shared secrets and look there, which fails. self.assertRaises(KeyError, secrets.GetSecret, 'bar') self.assertIsNotNone(secrets._user_secrets_manager) self.assertIsNotNone(secrets._shared_secrets_manager) # Non-devbox mode: user secrets are never used. shared secrets are initialized right away. options.options.devbox = False secrets._user_secrets_manager = None secrets._shared_secrets_manager = None secrets.InitSecrets() self.assertIsNone(secrets._user_secrets_manager) self.assertIsNotNone(secrets._shared_secrets_manager) # Lookup whatever we want, we still won't use the user secrets.:w secrets._shared_secrets_manager.PutSecret('foo', 'codeforfoo') self.assertEqual(secrets.GetSecret('foo'), 'codeforfoo') self.assertRaises(KeyError, secrets.GetSecret, 'bar') self.assertIsNone(secrets._user_secrets_manager) self.assertIsNotNone(secrets._shared_secrets_manager)
#!/usr/bin/env python import luigi import os from os.path import join as pjoin, exists, dirname import cPickle as pickle import glob import argparse import logging import pandas import rasterio from datacube.api.model import DatasetType from classifier import classifier from zonal_stats import zonal_stats from zonal_stats import zonal_class_distribution from image_processing.segmentation import rasterise_vector CONFIG = luigi.configuration.get_config() CONFIG.add_config_path(pjoin(dirname(__file__), 'config.cfg')) class RasteriseTask(luigi.Task): """ Computes the rasterisation for a cell. """ out_dir = luigi.Parameter() def requires(self): return [] def output(self): out_fname = pjoin(self.out_dir, 'RasteriseTask.completed') return luigi.LocalTarget(out_fname) def run(self): out_fname = pjoin(self.out_dir, CONFIG.get('outputs', 'rasterise_filename')) ds_list_fname = pjoin(self.out_dir, CONFIG.get('outputs', 'query_filename')) with open(ds_list_fname, 'r') as infile: ds_list = pickle.load(infile) vector_fname = CONFIG.get('work', 'vector_filename') img_fname = ds_list[0].datasets[DatasetType.FC25].path with rasterio.open(img_fname) as src: crs = src.crs transform = src.affine height = src.height width = src.width res = rasterise_vector(vector_fname, shape=(height, width), transform=transform, crs=crs) kwargs = {'count': 1, 'width': width, 'height': height, 'crs': crs, 'transform': transform, 'dtype': res.dtype.name, 'nodata': 0} with rasterio.open(out_fname, 'w', **kwargs) as src: src.write(1, res) # We could just set the image as the Luigi completion target... with self.output().open('w') as outf: outf.write('Complete') class ClassifierStatsTask(luigi.Task): """ Computes a zonal class distribution task for the required dataset. """ idx = luigi.IntParameter() out_fname = luigi.Parameter() def requires(self): return [RasteriseTask(dirname(self.out_fname))] def output(self): return luigi.LocalTarget(self.out_fname) def run(self): rasterised_fname = pjoin(dirname(self.out_fname), CONFIG.get('outputs', 'rasterise_filename')) ds_list_fname = pjoin(dirname(self.out_fname), CONFIG.get('outputs', 'query_filename')) with open(ds_list_fname, 'r') as infile: ds_list = pickle.load(infile) dataset = ds_list[self.idx] nbar_ds = dataset.datasets[DatasetType.ARG25] pq_ds = dataset.datasets[DatasetType.PQ25] classified_img = classifier(nbar_ds, pq_ds) # hard code; as this will be short lived due to agdc-v2 development class_ids = [0, 1, 2, 3, 4, 5] with rasterio.open(rasterised_fname, 'r') as src: zones_img = src.read(1) result = zonal_class_distribution(classified_img, zones_img, class_ids=class_ids) # Set the timestamp result['Timestamp'] = dataset.start_datetime # Open the output hdf5 file store = pandas.HDFStore(self.output().path) # Write the dataframe store.append('data', result) # Save and close the file store.close() class CellStatsTask(luigi.Task): """ For a given cell define a classifier stats task for each required Dataset. """ out_dir = luigi.Parameter() def requires(self): base_name = CONFIG.get('outputs', 'stats_filename_format') base_name = pjoin(self.out_dir, base_name) ds_list_fname = pjoin(self.out_dir, CONFIG.get('outputs', 'query_filename')) with open(ds_list_fname, 'r') as infile: ds_list = pickle.load(infile) targets = [] for idx, ds in enumerate(ds_list): timestamp = bytes(ds.start_datetime).replace(' ', '-') out_fname = base_name.format(timestamp) targets.append(ClassifierStatsTask(idx, out_fname)) return targets def output(self): out_fname = pjoin(self.out_dir, 'CellStatsTask.completed') return luigi.LocalTarget(out_fname) def run(self): with self.output().open('w') as outf: outf.write('Completed') class CombineCellStatsTask(luigi.Task): """ Combines all stats files from a single cell into a single file. """ out_dir = luigi.Parameter() def requires(self): return [CellStatsTask(self.out_dir)] def output(self): out_fname = pjoin(self.out_dir, 'CombineCellStatsTask.completed') return luigi.LocalTarget(out_fname) def run(self): # Get a list of the stats files for each timeslice stats_files_list = glob.glob(pjoin(self.out_dir, '*.h5')) # Create an output file that we can continually append data out_fname = pjoin(self.out_dir, CONFIG.get('outputs', 'combined_cell_stats_filename')) combined_store = pandas.HDFStore(out_fname) store = pandas.HDFStore(stats_files_list[0]) # If there is nothing in the first file there will be nothing for # every file if '/data' in store.keys(): # We have data to retrieve headings = store['data'].columns.tolist() store.close() df = pandas.DataFrame(columns=headings) for sfile in stats_files_list: store = pandas.HDFStore(sfile, 'r') df = df.append(store['data']) store.close() df.reset_index(inplace=True) # Write to disk combined_store.append('data', df) with self.output().open('w') as outf: outf.write('Completed') class RunCombineCellStatsTasks(luigi.Task): """ Issues CombineCellStatsTask's to each cell associated with the tile defined by the start and end index. """ idx1 = luigi.IntParameter() idx2 = luigi.IntParameter() def requires(self): base_out_dir = CONFIG.get('work', 'output_directory') cells_list_fname = pjoin(base_out_dir, CONFIG.get('outputs', 'cells_list')) with open(cells_list_fname, 'r') as infile: cells = pickle.load(infile) tasks = [] for cell in cells[self.idx1:self.idx2]: out_dir = pjoin(base_out_dir, cell) tasks.append(CombineCellStatsTask(out_dir)) return tasks def output(self): out_dir = CONFIG.get('work', 'output_directory') out_fname = pjoin(out_dir, 'RunCombineCellStatsTasks_{}:{}.completed') out_fname = out_fname.format(self.idx1, self.idx2) return luigi.LocalTarget(out_fname) def run(self): with self.output().open('w') as outf: outf.write('Completed') if __name__ == '__main__': # Setup command-line arguments desc = "Processes zonal stats for a given set of cells." hlp = ("The tile/chunk index to retieve from the tiles list. " "(Needs to have been previously computed to a file named tiles.pkl") parser = argparse.ArgumentParser(description=desc) parser.add_argument('--tile', type=int, help=hlp) parsed_args = parser.parse_args() tile_idx = parsed_args.tile # setup logging log_dir = CONFIG.get('work', 'logs_directory') if not exists(log_dir): os.makedirs(log_dir) logfile = "{log_path}/stats_workflow_{uname}_{pid}.log" logfile = logfile.format(log_path=log_dir, uname=os.uname()[1], pid=os.getpid()) logging_level = logging.INFO logging.basicConfig(filename=logfile, level=logging_level, format=("%(asctime)s: [%(name)s] (%(levelname)s) " "%(message)s ")) # Get the list of tiles (groups of cells that each node will operate on tiles_list_fname = pjoin(CONFIG.get('work', 'output_directory'), CONFIG.get('outputs', 'tiles_list')) with open(tiles_list_fname, 'r') as in_file: tiles = pickle.load(in_file) # Initialise the job tile = tiles[tile_idx] tasks = [RunCombineCellStatsTasks(tile[0], tile[1])] luigi.build(tasks, local_scheduler=True, workers=16) luigi.run()
# -*- coding: utf-8 -*- from __future__ import with_statement import warnings from almost import Approximate from pytest import deprecated_call, raises from conftest import various_backends from trueskill import * inf = float('inf') nan = float('nan') class almost(Approximate): def normalize(self, value): if isinstance(value, Rating): return self.normalize(tuple(value)) elif isinstance(value, list): try: if isinstance(value[0][0], Rating): # flatten transformed ratings return list(sum(value, ())) except (TypeError, IndexError): pass return super(almost, self).normalize(value) @classmethod def wrap(cls, f, *args, **kwargs): return lambda *a, **k: cls(f(*a, **k), *args, **kwargs) _rate = almost.wrap(rate) _rate_1vs1 = almost.wrap(rate_1vs1) _quality = almost.wrap(quality) _quality_1vs1 = almost.wrap(quality_1vs1) # usage def test_compatibility_with_another_rating_systems(): """All rating system modules should implement ``rate_1vs1`` and ``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games. """ r1, r2 = Rating(30, 3), Rating(20, 2) assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)]) rated = rate([(r1,), (r2,)]) assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0]) rated = rate([(r1,), (r2,)], [0, 0]) assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0]) def test_compare_ratings(): assert Rating(1, 2) == Rating(1, 2) assert Rating(1, 2) != Rating(1, 3) assert Rating(2, 2) > Rating(1, 2) assert Rating(3, 2) >= Rating(1, 2) assert Rating(0, 2) < Rating(1, 2) assert Rating(-1, 2) <= Rating(1, 2) def test_rating_to_number(): assert int(Rating(1, 2)) == 1 assert float(Rating(1.1, 2)) == 1.1 assert complex(Rating(1.2, 2)) == 1.2 + 0j try: assert long(Rating(1, 2)) == long(1) except NameError: # Python 3 doesn't have `long` anymore pass def test_unsorted_groups(): t1, t2, t3 = generate_teams([1, 1, 1]) rated = rate([t1, t2, t3], [2, 1, 0]) assert almost(rated) == \ [(18.325, 6.656), (25.000, 6.208), (31.675, 6.656)] def test_custom_environment(): env = TrueSkill(draw_probability=.50) t1, t2 = generate_teams([1, 1], env=env) rated = env.rate([t1, t2]) assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)] def test_setup_global_environment(): try: setup(draw_probability=.50) t1, t2 = generate_teams([1, 1]) rated = rate([t1, t2]) assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)] finally: # rollback setup() def test_invalid_rating_groups(): env = TrueSkill() with raises(ValueError): env.validate_rating_groups([]) with raises(ValueError): env.validate_rating_groups([()]) # need multiple groups not just one with raises(ValueError): env.validate_rating_groups([(Rating(),)]) # empty group is not allowed with raises(ValueError): env.validate_rating_groups([(Rating(),), ()]) # all groups should be same structure with raises(TypeError): env.validate_rating_groups([(Rating(),), {0: Rating()}]) def test_deprecated_methods(): env = TrueSkill() r1, r2, r3 = Rating(), Rating(), Rating() deprecated_call(transform_ratings, [(r1,), (r2,), (r3,)]) deprecated_call(match_quality, [(r1,), (r2,), (r3,)]) deprecated_call(env.Rating) deprecated_call(env.transform_ratings, [(r1,), (r2,), (r3,)]) deprecated_call(env.match_quality, [(r1,), (r2,), (r3,)]) deprecated_call(env.rate_1vs1, r1, r2) deprecated_call(env.quality_1vs1, r1, r2) deprecated_call(lambda: Rating().exposure) dyn = TrueSkill(draw_probability=dynamic_draw_probability) deprecated_call(dyn.rate, [(r1,), (r2,)]) def test_deprecated_individual_rating_groups(): r1, r2, r3 = Rating(50, 1), Rating(10, 5), Rating(15, 5) with raises(TypeError): deprecated_call(rate, [r1, r2, r3]) with raises(TypeError): deprecated_call(quality, [r1, r2, r3]) assert transform_ratings([r1, r2, r3]) == rate([(r1,), (r2,), (r3,)]) assert match_quality([r1, r2, r3]) == quality([(r1,), (r2,), (r3,)]) deprecated_call(transform_ratings, [r1, r2, r3]) deprecated_call(match_quality, [r1, r2, r3]) def test_rating_tuples(): r1, r2, r3 = Rating(), Rating(), Rating() rated = rate([(r1, r2), (r3,)]) assert len(rated) == 2 assert isinstance(rated[0], tuple) assert isinstance(rated[1], tuple) assert len(rated[0]) == 2 assert len(rated[1]) == 1 assert isinstance(rated[0][0], Rating) def test_rating_dicts(): class Player(object): def __init__(self, name, rating, team): self.name = name self.rating = rating self.team = team p1 = Player('Player A', Rating(), 0) p2 = Player('Player B', Rating(), 0) p3 = Player('Player C', Rating(), 1) rated = rate([{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}]) assert len(rated) == 2 assert isinstance(rated[0], dict) assert isinstance(rated[1], dict) assert len(rated[0]) == 2 assert len(rated[1]) == 1 assert p1 in rated[0] assert p2 in rated[0] assert p3 in rated[1] assert p1 not in rated[1] assert p2 not in rated[1] assert p3 not in rated[0] assert isinstance(rated[0][p1], Rating) p1.rating = rated[p1.team][p1] p2.rating = rated[p2.team][p2] p3.rating = rated[p3.team][p3] def test_dont_use_0_for_min_delta(): with raises(ValueError): rate([(Rating(),), (Rating(),)], min_delta=0) def test_list_instead_of_tuple(): r1, r2 = Rating(), Rating() assert rate([[r1], [r2]]) == rate([(r1,), (r2,)]) assert quality([[r1], [r2]]) == quality([(r1,), (r2,)]) def test_backend(): env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented)) with raises(TypeError): env.rate_1vs1(Rating(), Rating()) with raises(ValueError): # '__not_defined__' backend is not defined TrueSkill(backend='__not_defined__') # algorithm def generate_teams(sizes, env=None): rating_cls = Rating if env is None else env.create_rating rating_groups = [] for size in sizes: ratings = [] for x in range(size): ratings.append(rating_cls()) rating_groups.append(tuple(ratings)) return rating_groups def generate_individual(size, env=None): return generate_teams([1] * size, env=env) @various_backends def test_n_vs_n(): # 1 vs 1 t1, t2 = generate_teams([1, 1]) assert _quality([t1, t2]) == 0.447 assert _rate([t1, t2]) == [(29.396, 7.171), (20.604, 7.171)] assert _rate([t1, t2], [0, 0]) == [(25.000, 6.458), (25.000, 6.458)] # 2 vs 2 t1, t2 = generate_teams([2, 2]) assert _quality([t1, t2]) == 0.447 assert _rate([t1, t2]) == \ [(28.108, 7.774), (28.108, 7.774), (21.892, 7.774), (21.892, 7.774)] assert _rate([t1, t2], [0, 0]) == \ [(25.000, 7.455), (25.000, 7.455), (25.000, 7.455), (25.000, 7.455)] # 4 vs 4 t1, t2 = generate_teams([4, 4]) assert _quality([t1, t2]) == 0.447 assert _rate([t1, t2]) == \ [(27.198, 8.059), (27.198, 8.059), (27.198, 8.059), (27.198, 8.059), (22.802, 8.059), (22.802, 8.059), (22.802, 8.059), (22.802, 8.059)] @various_backends def test_1_vs_n(): t1, = generate_teams([1]) # 1 vs 2 t2, = generate_teams([2]) assert _quality([t1, t2]) == 0.135 assert _rate([t1, t2]) == \ [(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)] assert _rate([t1, t2], [0, 0]) == \ [(31.660, 7.138), (18.340, 7.138), (18.340, 7.138)] # 1 vs 3 t2, = generate_teams([3]) assert _quality([t1, t2]) == 0.012 assert _rate([t1, t2]) == \ [(36.337, 7.527), (13.663, 7.527), (13.663, 7.527), (13.663, 7.527)] assert almost(rate([t1, t2], [0, 0]), 2) == \ [(34.990, 7.455), (15.010, 7.455), (15.010, 7.455), (15.010, 7.455)] # 1 vs 7 t2, = generate_teams([7]) assert _quality([t1, t2]) == 0 assert _rate([t1, t2]) == \ [(40.582, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917)] @various_backends def test_individual(): # 3 players players = generate_individual(3) assert _quality(players) == 0.200 assert _rate(players) == \ [(31.675, 6.656), (25.000, 6.208), (18.325, 6.656)] assert _rate(players, [0] * 3) == \ [(25.000, 5.698), (25.000, 5.695), (25.000, 5.698)] # 4 players players = generate_individual(4) assert _quality(players) == 0.089 assert _rate(players) == \ [(33.207, 6.348), (27.401, 5.787), (22.599, 5.787), (16.793, 6.348)] # 5 players players = generate_individual(5) assert _quality(players) == 0.040 assert _rate(players) == \ [(34.363, 6.136), (29.058, 5.536), (25.000, 5.420), (20.942, 5.536), (15.637, 6.136)] # 8 players players = generate_individual(8) assert _quality(players) == 0.004 assert _rate(players, [0] * 8) == \ [(25.000, 4.592), (25.000, 4.583), (25.000, 4.576), (25.000, 4.573), (25.000, 4.573), (25.000, 4.576), (25.000, 4.583), (25.000, 4.592)] # 16 players players = generate_individual(16) assert _rate(players) == \ [(40.539, 5.276), (36.810, 4.711), (34.347, 4.524), (32.336, 4.433), (30.550, 4.380), (28.893, 4.349), (27.310, 4.330), (25.766, 4.322), (24.234, 4.322), (22.690, 4.330), (21.107, 4.349), (19.450, 4.380), (17.664, 4.433), (15.653, 4.524), (13.190, 4.711), (9.461, 5.276)] @various_backends def test_multiple_teams(): # 2 vs 4 vs 2 t1 = (Rating(40, 4), Rating(45, 3)) t2 = (Rating(20, 7), Rating(19, 6), Rating(30, 9), Rating(10, 4)) t3 = (Rating(50, 5), Rating(30, 2)) assert _quality([t1, t2, t3]) == 0.367 assert _rate([t1, t2, t3], [0, 1, 1]) == \ [(40.877, 3.840), (45.493, 2.934), (19.609, 6.396), (18.712, 5.625), (29.353, 7.673), (9.872, 3.891), (48.830, 4.590), (29.813, 1.976)] # 1 vs 2 vs 1 t1 = (Rating(),) t2 = (Rating(), Rating()) t3 = (Rating(),) assert _quality([t1, t2, t3]) == 0.047 @various_backends def test_upset(): # 1 vs 1 t1, t2 = (Rating(),), (Rating(50, 12.5),) assert _quality([t1, t2]) == 0.110 assert _rate([t1, t2], [0, 0]) == [(31.662, 7.137), (35.010, 7.910)] # 2 vs 2 t1 = (Rating(20, 8), Rating(25, 6)) t2 = (Rating(35, 7), Rating(40, 5)) assert _quality([t1, t2]) == 0.084 assert _rate([t1, t2]) == \ [(29.698, 7.008), (30.455, 5.594), (27.575, 6.346), (36.211, 4.768)] # 3 vs 2 t1 = (Rating(28, 7), Rating(27, 6), Rating(26, 5)) t2 = (Rating(30, 4), Rating(31, 3)) assert _quality([t1, t2]) == 0.254 assert _rate([t1, t2], [0, 1]) == \ [(28.658, 6.770), (27.484, 5.856), (26.336, 4.917), (29.785, 3.958), (30.879, 2.983)] assert _rate([t1, t2], [1, 0]) == \ [(21.840, 6.314), (22.474, 5.575), (22.857, 4.757), (32.012, 3.877), (32.132, 2.949)] # 8 players players = [(Rating(10, 8),), (Rating(15, 7),), (Rating(20, 6),), (Rating(25, 5),), (Rating(30, 4),), (Rating(35, 3),), (Rating(40, 2),), (Rating(45, 1),)] assert _quality(players) == 0.000 assert _rate(players) == \ [(35.135, 4.506), (32.585, 4.037), (31.329, 3.756), (30.984, 3.453), (31.751, 3.064), (34.051, 2.541), (38.263, 1.849), (44.118, 0.983)] @various_backends def test_partial_play(): t1, t2 = (Rating(),), (Rating(), Rating()) # each results from C# Skills: assert rate([t1, t2], weights=[(1,), (1, 1)]) == rate([t1, t2]) assert _rate([t1, t2], weights=[(1,), (1, 1)]) == \ [(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)] assert _rate([t1, t2], weights=[(0.5,), (0.5, 0.5)]) == \ [(33.939, 7.312), (16.061, 7.312), (16.061, 7.312)] assert _rate([t1, t2], weights=[(1,), (0, 1)]) == \ [(29.440, 7.166), (25.000, 8.333), (20.560, 7.166)] assert _rate([t1, t2], weights=[(1,), (0.5, 1)]) == \ [(32.417, 7.056), (21.291, 8.033), (17.583, 7.056)] # match quality of partial play t1, t2, t3 = (Rating(),), (Rating(), Rating()), (Rating(),) assert _quality([t1, t2, t3], [(1,), (0.25, 0.75), (1,)]) == 0.2 assert _quality([t1, t2, t3], [(1,), (0.8, 0.9), (1,)]) == 0.0809 @various_backends def test_partial_play_with_weights_dict(): t1, t2 = (Rating(),), (Rating(), Rating()) assert rate([t1, t2], weights={(0, 0): 0.5, (1, 0): 0.5, (1, 1): 0.5}) == \ rate([t1, t2], weights=[[0.5], [0.5, 0.5]]) assert rate([t1, t2], weights={(1, 0): 0}) == \ rate([t1, t2], weights=[[1], [0, 1]]) assert rate([t1, t2], weights={(1, 0): 0.5}) == \ rate([t1, t2], weights=[[1], [0.5, 1]]) @various_backends def test_microsoft_research_example(): # http://research.microsoft.com/en-us/projects/trueskill/details.aspx alice, bob, chris, darren, eve, fabien, george, hillary = \ Rating(), Rating(), Rating(), Rating(), \ Rating(), Rating(), Rating(), Rating() _rated = rate([{'alice': alice}, {'bob': bob}, {'chris': chris}, {'darren': darren}, {'eve': eve}, {'fabien': fabien}, {'george': george}, {'hillary': hillary}]) rated = {} list(map(rated.update, _rated)) assert almost(rated['alice']) == (36.771, 5.749) assert almost(rated['bob']) == (32.242, 5.133) assert almost(rated['chris']) == (29.074, 4.943) assert almost(rated['darren']) == (26.322, 4.874) assert almost(rated['eve']) == (23.678, 4.874) assert almost(rated['fabien']) == (20.926, 4.943) assert almost(rated['george']) == (17.758, 5.133) assert almost(rated['hillary']) == (13.229, 5.749) @various_backends def test_dynamic_draw_probability(): from trueskillhelpers import calc_dynamic_draw_probability as calc def assert_predictable_draw_probability(r1, r2, drawn=False): dyn = TrueSkill(draw_probability=dynamic_draw_probability) sta = TrueSkill(draw_probability=calc((r1,), (r2,), dyn)) assert dyn.rate_1vs1(r1, r2, drawn)== sta.rate_1vs1(r1, r2, drawn) assert_predictable_draw_probability(Rating(100), Rating(10)) assert_predictable_draw_probability(Rating(10), Rating(100)) assert_predictable_draw_probability(Rating(10), Rating(100), drawn=True) assert_predictable_draw_probability(Rating(25), Rating(25)) assert_predictable_draw_probability(Rating(25), Rating(25), drawn=True) assert_predictable_draw_probability(Rating(-25), Rating(125)) assert_predictable_draw_probability(Rating(125), Rating(-25)) assert_predictable_draw_probability(Rating(-25), Rating(125), drawn=True) assert_predictable_draw_probability(Rating(25, 10), Rating(25, 0.1)) # functions @various_backends def test_exposure(): env = TrueSkill() assert env.expose(env.create_rating()) == 0 env = TrueSkill(1000, 200) assert env.expose(env.create_rating()) == 0 # mathematics def test_valid_gaussian(): from trueskill.mathematics import Gaussian with raises(TypeError): # sigma argument is needed Gaussian(0) with raises(ValueError): # sigma**2 should be greater than 0 Gaussian(0, 0) def test_valid_matrix(): from trueskill.mathematics import Matrix with raises(TypeError): # src must be a list or dict or callable Matrix(None) with raises(ValueError): # src must be a rectangular array of numbers Matrix([]) with raises(ValueError): # src must be a rectangular array of numbers Matrix([[1, 2, 3], [4, 5]]) with raises(TypeError): # A callable src must return an interable which generates a tuple # containing coordinate and value Matrix(lambda: None) def test_matrix_from_dict(): from trueskill.mathematics import Matrix mat = Matrix({(0, 0): 1, (4, 9): 1}) assert mat.height == 5 assert mat.width == 10 assert mat[0][0] == 1 assert mat[0][1] == 0 assert mat[4][9] == 1 assert mat[4][8] == 0 def test_matrix_from_item_generator(): from trueskill.mathematics import Matrix def gen_matrix(height, width): yield (0, 0), 1 yield (height - 1, width - 1), 1 mat = Matrix(gen_matrix, 5, 10) assert mat.height == 5 assert mat.width == 10 assert mat[0][0] == 1 assert mat[0][1] == 0 assert mat[4][9] == 1 assert mat[4][8] == 0 with raises(TypeError): # A callable src must call set_height and set_width if the size is # non-deterministic Matrix(gen_matrix) def gen_and_set_size_matrix(set_height, set_width): set_height(5) set_width(10) return [((0, 0), 1), ((4, 9), 1)] mat = Matrix(gen_and_set_size_matrix) assert mat.height == 5 assert mat.width == 10 assert mat[0][0] == 1 assert mat[0][1] == 0 assert mat[4][9] == 1 assert mat[4][8] == 0 def test_matrix_operations(): from trueskill.mathematics import Matrix assert Matrix([[1, 2], [3, 4]]).inverse() == \ Matrix([[-2.0, 1.0], [1.5, -0.5]]) assert Matrix([[1, 2], [3, 4]]).determinant() == -2 assert Matrix([[1, 2], [3, 4]]).adjugate() == Matrix([[4, -2], [-3, 1]]) with raises(ValueError): # Bad size assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6]]) assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6, 7], [8, 9, 10]]) == \ Matrix([[21, 24, 27], [47, 54, 61]]) with raises(ValueError): # Must be same size Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6, 7], [8, 9, 10]]) assert Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6], [7, 8]]) == \ Matrix([[6, 8], [10, 12]]) # reported bugs @various_backends def test_issue3(): """The `issue #3`_, opened by @youknowone. These inputs led to ZeroDivisionError before 0.1.4. Also another TrueSkill implementations cannot calculate this case. .. _issue #3: https://github.com/sublee/trueskill/issues/3 """ # @konikos's case 1 t1 = (Rating(42.234, 3.728), Rating(43.290, 3.842)) t2 = (Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500)) rate([t1, t2], [6, 5]) # @konikos's case 2 t1 = (Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500)) t2 = (Rating(42.234, 3.728), Rating(43.291, 3.842)) rate([t1, t2], [0, 28]) @various_backends(['scipy']) def test_issue4(): """The `issue #4`_, opened by @sublee. numpy.float64 handles floating-point error by different way. For example, it can just warn RuntimeWarning on n/0 problem instead of throwing ZeroDivisionError. .. _issue #4: https://github.com/sublee/trueskill/issues/4 """ import numpy r1, r2 = Rating(105.247, 0.439), Rating(27.030, 0.901) # make numpy to raise FloatingPointError instead of warning # RuntimeWarning old_settings = numpy.seterr(divide='raise') try: rate([(r1,), (r2,)]) finally: numpy.seterr(**old_settings) @various_backends([None, 'scipy']) def test_issue5(backend): """The `issue #5`_, opened by @warner121. This error occurs when a winner has too low rating than a loser. Basically Python cannot calculate correct result but mpmath_ can. I added ``backend`` option to :class:`TrueSkill` class. If it is set to 'mpmath' then the problem will have gone. The result of TrueSkill calculator by Microsoft is N(-273.092, 2.683) and N(-75.830, 2.080), of C# Skills by Moserware is N(NaN, 2.6826) and N(NaN, 2.0798). I choose Microsoft's result as an expectation for the test suite. .. _issue #5: https://github.com/sublee/trueskill/issues/5 .. _mpmath: http://mpmath.googlecode.com/ """ assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0 with raises(FloatingPointError): rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) assert _quality_1vs1(Rating(), Rating(1000)) == 0 with raises(FloatingPointError): rate_1vs1(Rating(), Rating(1000)) @various_backends(['mpmath']) def test_issue5_with_mpmath(): _rate_1vs1 = almost.wrap(rate_1vs1, 0) assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0 assert _rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == \ [(-273.361, 2.683), (-75.683, 2.080)] assert _quality_1vs1(Rating(), Rating(1000)) == 0 assert _rate_1vs1(Rating(), Rating(1000)) == \ [(415.298, 6.455), (609.702, 6.455)] @various_backends(['mpmath']) def test_issue5_with_more_extreme(): """If the input is more extreme, 'mpmath' backend also made an exception. But we can avoid the problem with higher precision. """ import mpmath try: dps = mpmath.mp.dps with raises(FloatingPointError): rate_1vs1(Rating(), Rating(1000000)) mpmath.mp.dps = 50 assert almost(rate_1vs1(Rating(), Rating(1000000)), prec=-1) == \ [(400016.896, 6.455), (600008.104, 6.455)] with raises(FloatingPointError): rate_1vs1(Rating(), Rating(1000000000000)) mpmath.mp.dps = 100 assert almost(rate_1vs1(Rating(), Rating(1000000000000)), prec=-7) == \ [(400001600117.693, 6.455), (599998399907.307, 6.455)] finally: mpmath.mp.dps = dps
# -------------------------------------------------------- # Fully Convolutional Instance-aware Semantic Segmentation # Copyright (c) 2016 by Contributors # Copyright (c) 2017 Microsoft # Licensed under The Apache-2.0 License [see LICENSE for details] # Modified by Haozhi Qi, Guodong Zhang # -------------------------------------------------------- import mxnet as mx import numpy as np def get_rpn_names(): pred = ['rpn_cls_prob', 'rpn_bbox_loss'] label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight'] return pred, label def get_rcnn_names(cfg): pred = ['rcnn_cls_prob', 'rcnn_bbox_loss', 'fcis_mask_loss', 'fcis_mask_label'] label = ['rcnn_label', 'rcnn_bbox_target', 'rcnn_bbox_weight'] if cfg.TRAIN.END2END: pred.append('rcnn_label') rpn_pred, rpn_label = get_rpn_names() pred = rpn_pred + pred label = rpn_label return pred, label class RPNAccMetric(mx.metric.EvalMetric): def __init__(self): super(RPNAccMetric, self).__init__('RPNAcc') self.pred, self.label = get_rpn_names() def update(self, labels, preds): pred = preds[self.pred.index('rpn_cls_prob')] label = labels[self.label.index('rpn_label')] # pred (b, c, p) or (b, c, h, w) pred_label = mx.ndarray.argmax_channel(pred).asnumpy().astype('int32') pred_label = pred_label.reshape((pred_label.shape[0], -1)) # label (b, p) label = label.asnumpy().astype('int32') # filter with keep_inds keep_inds = np.where(label != -1) pred_label = pred_label[keep_inds] label = label[keep_inds] self.sum_metric += np.sum(pred_label.flat == label.flat) self.num_inst += len(pred_label.flat) class RPNLogLossMetric(mx.metric.EvalMetric): def __init__(self): super(RPNLogLossMetric, self).__init__('RPNLogLoss') self.pred, self.label = get_rpn_names() def update(self, labels, preds): pred = preds[self.pred.index('rpn_cls_prob')] label = labels[self.label.index('rpn_label')] # label (b, p) label = label.asnumpy().astype('int32').reshape((-1)) # pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c) pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1)) pred = pred.reshape((label.shape[0], -1)) # filter with keep_inds keep_inds = np.where(label != -1)[0] label = label[keep_inds] cls = pred[keep_inds, label] cls += 1e-14 cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class RPNL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(RPNL1LossMetric, self).__init__('RPNL1Loss') self.pred, self.label = get_rpn_names() def update(self, labels, preds): bbox_loss = preds[self.pred.index('rpn_bbox_loss')].asnumpy() label = labels[self.label.index('rpn_label')].asnumpy() num_inst = np.sum(label != -1) self.sum_metric += np.sum(bbox_loss) self.num_inst += num_inst class FCISAccMetric(mx.metric.EvalMetric): def __init__(self, cfg): super(FCISAccMetric, self).__init__('FCISAcc') self.e2e = cfg.TRAIN.END2END self.pred, self.label = get_rcnn_names(cfg) self.cfg = cfg def update(self, labels, preds): pred = preds[self.pred.index('rcnn_cls_prob')] if self.e2e: label = preds[self.pred.index('rcnn_label')] else: label = labels[self.label.index('rcnn_label')] last_dim = pred.shape[-1] pred_label = pred.asnumpy().reshape(-1, last_dim).argmax(axis=1).astype('int32') label = label.asnumpy().reshape(-1,).astype('int32') # filter with keep_inds keep_inds = np.where(label != -1) pred_label = pred_label[keep_inds] label = label[keep_inds] self.sum_metric += np.sum(pred_label.flat == label.flat) self.num_inst += len(pred_label.flat) class FCISAccFGMetric(mx.metric.EvalMetric): def __init__(self, cfg): super(FCISAccFGMetric, self).__init__('FCISAccFG') self.e2e = cfg.TRAIN.END2END self.pred, self.label = get_rcnn_names(cfg) self.cfg = cfg def update(self, labels, preds): pred = preds[self.pred.index('rcnn_cls_prob')] if self.e2e: label = preds[self.pred.index('rcnn_label')] else: label = labels[self.label.index('rcnn_label')] last_dim = pred.shape[-1] pred_label = pred.asnumpy().reshape(-1, last_dim).argmax(axis=1).astype('int32') label = label.asnumpy().reshape(-1,).astype('int32') # filter with keep_inds keep_inds = np.where(label > 0) pred_label = pred_label[keep_inds] label = label[keep_inds] self.sum_metric += np.sum(pred_label.flat == label.flat) self.num_inst += len(pred_label.flat) class FCISLogLossMetric(mx.metric.EvalMetric): def __init__(self, cfg): super(FCISLogLossMetric, self).__init__('FCISLogLoss') self.e2e = cfg.TRAIN.END2END self.pred, self.label = get_rcnn_names(cfg) self.cfg = cfg def update(self, labels, preds): pred = preds[self.pred.index('rcnn_cls_prob')] if self.e2e: label = preds[self.pred.index('rcnn_label')] else: label = labels[self.label.index('rcnn_label')] last_dim = pred.shape[-1] pred = pred.asnumpy().reshape(-1, last_dim) label = label.asnumpy().reshape(-1,).astype('int32') # filter with keep_inds keep_inds = np.where(label != -1)[0] label = label[keep_inds] cls = pred[keep_inds, label] cls += 1e-14 cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class FCISL1LossMetric(mx.metric.EvalMetric): def __init__(self, cfg): super(FCISL1LossMetric, self).__init__('FCISL1Loss') self.e2e = cfg.TRAIN.END2END self.pred, self.label = get_rcnn_names(cfg) self.cfg = cfg def update(self, labels, preds): bbox_loss = preds[self.pred.index('rcnn_bbox_loss')].asnumpy() if self.e2e: label = preds[self.pred.index('rcnn_label')].asnumpy() else: label = labels[self.label.index('rcnn_label')].asnumpy() # calculate num_inst num_inst = np.sum(label != -1) self.sum_metric += np.sum(bbox_loss) self.num_inst += num_inst class FCISMaskLossMetric(mx.metric.EvalMetric): def __init__(self, cfg): super(FCISMaskLossMetric, self).__init__('FCISMaskLoss') self.e2e = cfg.TRAIN.END2END self.pred, self.label = get_rcnn_names(cfg) self.cfg = cfg def update(self, labels, preds): mask_loss = preds[self.pred.index('fcis_mask_loss')] if self.e2e: label = preds[self.pred.index('fcis_mask_label')] else: raise NotImplementedError mask_size = mask_loss.shape[2] label = label.asnumpy().astype('int32').reshape((-1)) mask_loss = mx.nd.transpose(mask_loss.reshape((mask_loss.shape[0], mask_loss.shape[1], mask_size * mask_size)), axes=(0, 2, 1)) mask_loss = mask_loss.reshape((label.shape[0], 2)) mask_loss = mask_loss.asnumpy() keep_inds = np.where(label != -1)[0] label = label[keep_inds] cls = mask_loss[keep_inds, label] cls += 1e-14 cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += len(keep_inds)
# -*- coding: utf-8 -*- from ccxt.async.base.exchange import Exchange import hashlib from ccxt.base.errors import ExchangeError class nova (Exchange): def describe(self): return self.deep_extend(super(nova, self).describe(), { 'id': 'nova', 'name': 'Novaexchange', 'countries': 'TZ', # Tanzania 'rateLimit': 2000, 'version': 'v2', 'hasCORS': False, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/30518571-78ca0bca-9b8a-11e7-8840-64b83a4a94b2.jpg', 'api': 'https://novaexchange.com/remote', 'www': 'https://novaexchange.com', 'doc': 'https://novaexchange.com/remote/faq', }, 'api': { 'public': { 'get': [ 'markets/', 'markets/{basecurrency}/', 'market/info/{pair}/', 'market/orderhistory/{pair}/', 'market/openorders/{pair}/buy/', 'market/openorders/{pair}/sell/', 'market/openorders/{pair}/both/', 'market/openorders/{pair}/{ordertype}/', ], }, 'private': { 'post': [ 'getbalances/', 'getbalance/{currency}/', 'getdeposits/', 'getwithdrawals/', 'getnewdepositaddress/{currency}/', 'getdepositaddress/{currency}/', 'myopenorders/', 'myopenorders_market/{pair}/', 'cancelorder/{orderid}/', 'withdraw/{currency}/', 'trade/{pair}/', 'tradehistory/', 'getdeposithistory/', 'getwithdrawalhistory/', 'walletstatus/', 'walletstatus/{currency}/', ], }, }, }) async def fetch_markets(self): response = await self.publicGetMarkets() markets = response['markets'] result = [] for i in range(0, len(markets)): market = markets[i] if not market['disabled']: id = market['marketname'] quote, base = id.split('_') symbol = base + '/' + quote result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'info': market, }) return result async def fetch_order_book(self, symbol, params={}): await self.load_markets() orderbook = await self.publicGetMarketOpenordersPairBoth(self.extend({ 'pair': self.market_id(symbol), }, params)) return self.parse_order_book(orderbook, None, 'buyorders', 'sellorders', 'price', 'amount') async def fetch_ticker(self, symbol, params={}): await self.load_markets() response = await self.publicGetMarketInfoPair(self.extend({ 'pair': self.market_id(symbol), }, params)) ticker = response['markets'][0] timestamp = self.milliseconds() return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': float(ticker['high24h']), 'low': float(ticker['low24h']), 'bid': self.safe_float(ticker, 'bid'), 'ask': self.safe_float(ticker, 'ask'), 'vwap': None, 'open': None, 'close': None, 'first': None, 'last': float(ticker['last_price']), 'change': float(ticker['change24h']), 'percentage': None, 'average': None, 'baseVolume': None, 'quoteVolume': float(ticker['volume24h']), 'info': ticker, } def parse_trade(self, trade, market): timestamp = trade['unix_t_datestamp'] * 1000 return { 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': market['symbol'], 'id': None, 'order': None, 'type': None, 'side': trade['tradetype'].lower(), 'price': float(trade['price']), 'amount': float(trade['amount']), } async def fetch_trades(self, symbol, since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) response = await self.publicGetMarketOrderhistoryPair(self.extend({ 'pair': market['id'], }, params)) return self.parse_trades(response['items'], market, since, limit) async def fetch_balance(self, params={}): await self.load_markets() response = await self.privatePostGetbalances() balances = response['balances'] result = {'info': response} for b in range(0, len(balances)): balance = balances[b] currency = balance['currency'] lockbox = float(balance['amount_lockbox']) trades = float(balance['amount_trades']) account = { 'free': float(balance['amount']), 'used': self.sum(lockbox, trades), 'total': float(balance['amount_total']), } result[currency] = account return self.parse_balance(result) async def create_order(self, symbol, type, side, amount, price=None, params={}): if type == 'market': raise ExchangeError(self.id + ' allows limit orders only') await self.load_markets() amount = str(amount) price = str(price) market = self.market(symbol) order = { 'tradetype': side.upper(), 'tradeamount': amount, 'tradeprice': price, 'tradebase': 1, 'pair': market['id'], } response = await self.privatePostTradePair(self.extend(order, params)) return { 'info': response, 'id': None, } async def cancel_order(self, id, symbol=None, params={}): return await self.privatePostCancelorder(self.extend({ 'orderid': id, }, params)) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = self.urls['api'] + '/' + self.version + '/' if api == 'private': url += api + '/' url += self.implode_params(path, params) query = self.omit(params, self.extract_params(path)) if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() nonce = str(self.nonce()) url += '?' + self.urlencode({'nonce': nonce}) signature = self.hmac(self.encode(url), self.encode(self.secret), hashlib.sha512, 'base64') body = self.urlencode(self.extend({ 'apikey': self.apiKey, 'signature': signature, }, query)) headers = { 'Content-Type': 'application/x-www-form-urlencoded', } return {'url': url, 'method': method, 'body': body, 'headers': headers} async def request(self, path, api='public', method='GET', params={}, headers=None, body=None): response = await self.fetch2(path, api, method, params, headers, body) if 'status' in response: if response['status'] != 'success': raise ExchangeError(self.id + ' ' + self.json(response)) return response
__author__ = 'nick' import os.path import textract import urllib2 import cookielib import time from bs4 import BeautifulSoup from pymongo import MongoClient """ Class to gather documents for the corpus Libraries: - Textract http://textract.readthedocs.org/en/latest/index.html - BeautifulSoup4 http://www.crummy.com/software/BeautifulSoup/ - PyMongo http://api.mongodb.org/python/current/ """ class PaperCrawler: def __init__(self): """ Sole constructor. Also used to start the crawling process :return: nothing """ f=open('keywords.txt','rw') #opens file with comma seperated keywords for the search keywords=f.readline().split(',') print len(keywords) if len(keywords)<2: print 'no keywords left' self.done=True else: print 'keyword found' for word in keywords: self.db_init() res=self.getkeywordresults(word.strip(),4) self.addkeyworddocuments(res) self.client.close() time.sleep(120) #arbitrarily chosen wait time to avoid bot lockout def getkeywordresults(self,keyword,pages): """ :param keyword: keyword(s) to search google scholar for :param pages: number of result pages that should be processes :return: a list of articles that were returned from scholar for the given keyword for all pages. Articles are modeled as a list of their attributes (title,authors, pdfurl, year and number of citations) """ results=[] for i in range(0,pages): if i is not 0: start=str(i)+str(0) else: start=str(i) l=self.searchkeyword(keyword,start) results+=l time.sleep(30) return results def searchkeyword(self,keyword,start): """ Fetches the results from one page of the google scholar result list for the given keywords :param keyword: keyword(s) to search google scholar for :param start: result list rank at which to start, used to address different pages :return: a list of articles that were returned from scholar for the given keyword for one page. Articles are modeled as a list of their attributes (title,authors, pdfurl, year and number of citations) """ head='http://scholar.google.de/scholar?start='+start+'&q=' tail='&hl=de&as_sdt=0,5' print keyword tsplit=keyword.split() print tsplit query="" for word in tsplit: query+=word+'+' url=head+query+tail cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) opener.addheaders.append(('Cookie', cj)) request = urllib2.Request(url) request.add_header('User-Agent', 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11') response = opener.open(request) html = response.read() soup = BeautifulSoup(html) articles=[] for element in soup.find_all('div',{'class': 'gs_r'}): try: title=element.find('div',{'class' :'gs_ri'}).find('h3',{'class' :'gs_rt'}).find('a').text autele=element.find('div',{'class' :'gs_ri'}).find('div',{'class' :'gs_a'}) nolinkaut=autele.text authors=nolinkaut.split('-')[0] yearstring=nolinkaut.split('-')[1] except: continue if ',' in yearstring: year=yearstring.split(',')[1] else: year=yearstring try: cites=element.find('div',{'class' :'gs_ri'}).find('div',{'class' :'gs_fl'}).findNext('a').text.split(':')[1] except: cites='0' pdfele=element.find('div',{'class' :'gs_ggs gs_fl'}) if pdfele is not None: pdfurl=pdfele.find('div').find('a')['href'] else: pdfurl=None print 'no url found' continue print title print authors print pdfurl print year print cites articles.append([title,authors,pdfurl,year,cites]) return articles def addkeyworddocuments(self,results): """ Takes the list resulting from getkeywordresults, downloads the pdfs of the documents, extracts the text from the pdfs and adds the documents with all acquired features to the database :param results: document list resulting from getkeywordresults :return: nothing """ doclist=[] for document in results: pdfret=self.downloadpdf(document) if pdfret is not None: doclist.append(pdfret) print 'doclist length: '+str(len(doclist)) doclist=self.filterbrokenfiles(doclist) dicts = [] for doc in doclist: if self.docdb.docs.find({"title" : doc[0][0] }).count()==0: print 'doc not in db' try: print doc[1] text=self.getpdftext(doc[1]) text=' '.join(text.split()) except: print 'error extracting text' os.remove(doc[1]) continue #[title,authors,pdfurl,year,cites] docdict={'title' : doc[0][0],'author' : doc[0][1],'year':doc[0][3],'cits' : doc[0][4], 'text' : text} dicts.append(docdict) if dicts: print 'inserting documents' self.docdb.docs.insert_many(dicts) for doc in doclist: if os.path.isfile(doc[1]) and self.docdb.docs.find({"title" : doc[0][0] }).count()!=0: os.remove(doc[1]) def downloadpdf(self,result): """ Utility method to download the pdf file of a paper :param result: the document attributes in form of a list :return: if file could be downloaded: tuple of document attributes (list) and the path to the pdf file else: None """ title=result[0] authors=result[1] url=result[2] year=result[3] cits=result[4] docatts=[title,authors,url,year,cits] cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) opener.addheaders.append(('Cookie', cj)) # if pdf has already been downloaded, don't download it again if os.path.isfile('/home/nick/workspace/SPR/tmp_documents/'+title+'.pdf'): print 'File already present' return [title,'/home/nick/workspace/SPR/tmp_documents/'+title+'.pdf'] elif self.docdb.docs.find({"title" : title }).count()!=0: print 'document already in db' return None if url is not None: if url.startswith('http://scholar.google.com/'): urlsplit=url.split('http://scholar.google.com/') url=urlsplit[1] request = urllib2.Request(url) print("... Sending HTTP GET to %s" % url) try: f = opener.open(request) except: return None data = f.read() f.close() opener.close() storagepath='/home/nick/workspace/SPR/tmp_documents/'+title+'.pdf' try: FILE = open(storagepath, "wb") except: return None FILE.write(data) FILE.close() return docatts,storagepath else: return None def filterbrokenfiles(self,doclist): """ Method to filter out files smaller than 100 KB since they're likely to be broken :param doclist: list of documents that should be checked :return: list of documents that are okay """ filteredList=doclist for doc in doclist: filepath=doc[1] filesize=os.stat(filepath).st_size #print 'File: '+filepath +' of size '+str(filesize) #if file smaller than 100kb, delete since probably broken if filesize<100000: os.remove(filepath) filteredList.remove(doc) print '#Warning: File '+filepath +' deleted, since possibly broken' return filteredList def getpdftext(self,path): """ Method that returns the text of a pdf by using the textract library :param path: path to pdf file :return: text of document or -1 if extraction failed """ text="" try: text = textract.process(path) except: text=-1 return text def db_init(self): self.client = MongoClient() self.docdb=self.client.documentdb class Janitor: """ Small helper class to remove documents with missing aruments from the database """ def __init__(self): self.db_init() self.db_removebrokendocs() def db_init(self): self.client = MongoClient() self.docdb=self.client.documentdb def db_removebrokendocs(self): docs=self.docdb.docs.find() for item in docs: title=item['title'] if len(title)<3: print 'removing '+title+' with id '+str(item['_id']) self.docdb.docs.remove({'_id': item['_id']}, 1)
# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # pylint: disable=too-many-lines # pylint: disable=unused-argument from knack.util import CLIError from azure.cli.core.util import sdk_no_wait def synapse_kusto_operation_list(client): return client.list() def synapse_kusto_pool_list_sku(client): return client.list_skus() def synapse_kusto_pool_list(client, resource_group_name, workspace_name): return client.list_by_workspace(resource_group_name=resource_group_name, workspace_name=workspace_name) def synapse_kusto_pool_show(client, workspace_name, kusto_pool_name, resource_group_name): return client.get(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_pool_delete(client, workspace_name, resource_group_name, kusto_pool_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_delete, workspace_name=workspace_name, resource_group_name=resource_group_name, kusto_pool_name=kusto_pool_name) def synapse_kusto_pool_list_follower_database(client, workspace_name, kusto_pool_name, resource_group_name): return client.list_follower_databases(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_pool_list_language_extension(client, workspace_name, kusto_pool_name, resource_group_name): return client.list_language_extensions(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_pool_list_sku(client, workspace_name, kusto_pool_name, resource_group_name): return client.list_skus_by_resource(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_pool_start(client, workspace_name, kusto_pool_name, resource_group_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_start, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_pool_stop(client, workspace_name, kusto_pool_name, resource_group_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_stop, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_attached_database_configuration_list(client, workspace_name, kusto_pool_name, resource_group_name): return client.list_by_kusto_pool(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_attached_database_configuration_show(client, workspace_name, kusto_pool_name, attached_database_configuration_name, resource_group_name): return client.get(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, attached_database_configuration_name=attached_database_configuration_name, resource_group_name=resource_group_name) def synapse_kusto_attached_database_configuration_create(client, workspace_name, kusto_pool_name, attached_database_configuration_name, resource_group_name, location=None, database_name=None, kusto_pool_resource_id=None, default_principals_modification_kind=None, table_level_sharing_properties=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location if database_name is not None: parameters['database_name'] = database_name if kusto_pool_resource_id is not None: parameters['kusto_pool_resource_id'] = kusto_pool_resource_id if default_principals_modification_kind is not None: parameters['default_principals_modification_kind'] = default_principals_modification_kind if table_level_sharing_properties is not None: parameters['table_level_sharing_properties'] = table_level_sharing_properties return sdk_no_wait(no_wait, client.begin_create_or_update, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, attached_database_configuration_name=attached_database_configuration_name, resource_group_name=resource_group_name, parameters=parameters) def synapse_kusto_attached_database_configuration_update(instance, workspace_name, kusto_pool_name, attached_database_configuration_name, resource_group_name, location=None, database_name=None, kusto_pool_resource_id=None, default_principals_modification_kind=None, table_level_sharing_properties=None, no_wait=False): if location is not None: instance.location = location if database_name is not None: instance.database_name = database_name if kusto_pool_resource_id is not None: instance.kusto_pool_resource_id = kusto_pool_resource_id if default_principals_modification_kind is not None: instance.default_principals_modification_kind = default_principals_modification_kind if table_level_sharing_properties is not None: instance.table_level_sharing_properties = table_level_sharing_properties return instance def synapse_kusto_attached_database_configuration_delete(client, workspace_name, kusto_pool_name, attached_database_configuration_name, resource_group_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_delete, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, attached_database_configuration_name=attached_database_configuration_name, resource_group_name=resource_group_name) def synapse_kusto_database_list(client, resource_group_name, workspace_name, kusto_pool_name): return client.list_by_kusto_pool(resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name) def synapse_kusto_database_show(client, resource_group_name, workspace_name, kusto_pool_name, database_name): return client.get(resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name) def synapse_kusto_database_create(client, resource_group_name, workspace_name, kusto_pool_name, database_name, read_write_database=None, no_wait=False): all_parameters = [] if read_write_database is not None: all_parameters.append(read_write_database) if len(all_parameters) > 1: raise CLIError('at most one of read_write_database is needed for parameters!') if len(all_parameters) != 1: raise CLIError('parameters is required. but none of read_write_database is provided!') parameters = all_parameters[0] if len(all_parameters) == 1 else None return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, parameters=parameters) def synapse_kusto_database_update(client, resource_group_name, workspace_name, kusto_pool_name, database_name, read_write_database=None, no_wait=False): all_parameters = [] if read_write_database is not None: all_parameters.append(read_write_database) if len(all_parameters) > 1: raise CLIError('at most one of read_write_database is needed for parameters!') if len(all_parameters) != 1: raise CLIError('parameters is required. but none of read_write_database is provided!') parameters = all_parameters[0] if len(all_parameters) == 1 else None return sdk_no_wait(no_wait, client.begin_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, parameters=parameters) def synapse_kusto_database_delete(client, resource_group_name, workspace_name, kusto_pool_name, database_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_delete, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name) def synapse_kusto_data_connection_list(client, resource_group_name, workspace_name, kusto_pool_name, database_name): return client.list_by_database(resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name) def synapse_kusto_data_connection_show(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name): return client.get(resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name) def synapse_kusto_data_connection_event_grid_create(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, location=None, storage_account_resource_id=None, event_hub_resource_id=None, consumer_group=None, table_name=None, mapping_rule_name=None, data_format=None, ignore_first_record=None, blob_storage_event_type=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location parameters['kind'] = 'EventGrid' if storage_account_resource_id is not None: parameters['storage_account_resource_id'] = storage_account_resource_id if event_hub_resource_id is not None: parameters['event_hub_resource_id'] = event_hub_resource_id if consumer_group is not None: parameters['consumer_group'] = consumer_group if table_name is not None: parameters['table_name'] = table_name if mapping_rule_name is not None: parameters['mapping_rule_name'] = mapping_rule_name if data_format is not None: parameters['data_format'] = data_format if ignore_first_record is not None: parameters['ignore_first_record'] = ignore_first_record if blob_storage_event_type is not None: parameters['blob_storage_event_type'] = blob_storage_event_type return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name, parameters=parameters) def synapse_kusto_data_connection_event_hub_create(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, location=None, event_hub_resource_id=None, consumer_group=None, table_name=None, mapping_rule_name=None, data_format=None, event_system_properties=None, compression=None, managed_identity_resource_id=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location parameters['kind'] = 'EventHub' if event_hub_resource_id is not None: parameters['event_hub_resource_id'] = event_hub_resource_id if consumer_group is not None: parameters['consumer_group'] = consumer_group if table_name is not None: parameters['table_name'] = table_name if mapping_rule_name is not None: parameters['mapping_rule_name'] = mapping_rule_name if data_format is not None: parameters['data_format'] = data_format if event_system_properties is not None: parameters['event_system_properties'] = event_system_properties if compression is not None: parameters['compression'] = compression if managed_identity_resource_id is not None: parameters['managed_identity_resource_id'] = managed_identity_resource_id return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name, parameters=parameters) def synapse_kusto_data_connection_iot_hub_create(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, location=None, iot_hub_resource_id=None, consumer_group=None, table_name=None, mapping_rule_name=None, data_format=None, event_system_properties=None, shared_access_policy_name=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location parameters['kind'] = 'IotHub' if iot_hub_resource_id is not None: parameters['iot_hub_resource_id'] = iot_hub_resource_id if consumer_group is not None: parameters['consumer_group'] = consumer_group if table_name is not None: parameters['table_name'] = table_name if mapping_rule_name is not None: parameters['mapping_rule_name'] = mapping_rule_name if data_format is not None: parameters['data_format'] = data_format if event_system_properties is not None: parameters['event_system_properties'] = event_system_properties if shared_access_policy_name is not None: parameters['shared_access_policy_name'] = shared_access_policy_name return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name, parameters=parameters) def synapse_kusto_data_connection_event_grid_update(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, location=None, storage_account_resource_id=None, event_hub_resource_id=None, consumer_group=None, table_name=None, mapping_rule_name=None, data_format=None, ignore_first_record=None, blob_storage_event_type=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location parameters['kind'] = 'EventGrid' if storage_account_resource_id is not None: parameters['storage_account_resource_id'] = storage_account_resource_id if event_hub_resource_id is not None: parameters['event_hub_resource_id'] = event_hub_resource_id if consumer_group is not None: parameters['consumer_group'] = consumer_group if table_name is not None: parameters['table_name'] = table_name if mapping_rule_name is not None: parameters['mapping_rule_name'] = mapping_rule_name if data_format is not None: parameters['data_format'] = data_format if ignore_first_record is not None: parameters['ignore_first_record'] = ignore_first_record if blob_storage_event_type is not None: parameters['blob_storage_event_type'] = blob_storage_event_type return sdk_no_wait(no_wait, client.begin_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name, parameters=parameters) def synapse_kusto_data_connection_event_hub_update(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, location=None, event_hub_resource_id=None, consumer_group=None, table_name=None, mapping_rule_name=None, data_format=None, event_system_properties=None, compression=None, managed_identity_resource_id=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location parameters['kind'] = 'EventHub' if event_hub_resource_id is not None: parameters['event_hub_resource_id'] = event_hub_resource_id if consumer_group is not None: parameters['consumer_group'] = consumer_group if table_name is not None: parameters['table_name'] = table_name if mapping_rule_name is not None: parameters['mapping_rule_name'] = mapping_rule_name if data_format is not None: parameters['data_format'] = data_format if event_system_properties is not None: parameters['event_system_properties'] = event_system_properties if compression is not None: parameters['compression'] = compression if managed_identity_resource_id is not None: parameters['managed_identity_resource_id'] = managed_identity_resource_id return sdk_no_wait(no_wait, client.begin_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name, parameters=parameters) def synapse_kusto_data_connection_iot_hub_update(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, location=None, iot_hub_resource_id=None, consumer_group=None, table_name=None, mapping_rule_name=None, data_format=None, event_system_properties=None, shared_access_policy_name=None, no_wait=False): parameters = {} if location is not None: parameters['location'] = location parameters['kind'] = 'IotHub' if iot_hub_resource_id is not None: parameters['iot_hub_resource_id'] = iot_hub_resource_id if consumer_group is not None: parameters['consumer_group'] = consumer_group if table_name is not None: parameters['table_name'] = table_name if mapping_rule_name is not None: parameters['mapping_rule_name'] = mapping_rule_name if data_format is not None: parameters['data_format'] = data_format if event_system_properties is not None: parameters['event_system_properties'] = event_system_properties if shared_access_policy_name is not None: parameters['shared_access_policy_name'] = shared_access_policy_name return sdk_no_wait(no_wait, client.begin_update, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name, parameters=parameters) def synapse_kusto_data_connection_delete(client, resource_group_name, workspace_name, kusto_pool_name, database_name, data_connection_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_delete, resource_group_name=resource_group_name, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, data_connection_name=data_connection_name) def synapse_kusto_pool_principal_assignment_list(client, workspace_name, kusto_pool_name, resource_group_name): return client.list(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, resource_group_name=resource_group_name) def synapse_kusto_pool_principal_assignment_show(client, workspace_name, kusto_pool_name, principal_assignment_name, resource_group_name): return client.get(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, principal_assignment_name=principal_assignment_name, resource_group_name=resource_group_name) def synapse_kusto_pool_principal_assignment_create(client, workspace_name, kusto_pool_name, principal_assignment_name, resource_group_name, principal_id=None, role=None, tenant_id=None, principal_type=None, no_wait=False): parameters = {} if principal_id is not None: parameters['principal_id'] = principal_id if role is not None: parameters['role'] = role if tenant_id is not None: parameters['tenant_id'] = tenant_id if principal_type is not None: parameters['principal_type'] = principal_type return sdk_no_wait(no_wait, client.begin_create_or_update, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, principal_assignment_name=principal_assignment_name, resource_group_name=resource_group_name, parameters=parameters) def synapse_kusto_pool_principal_assignment_update(instance, workspace_name, kusto_pool_name, principal_assignment_name, resource_group_name, principal_id=None, role=None, tenant_id=None, principal_type=None, no_wait=False): if principal_id is not None: instance.principal_id = principal_id if role is not None: instance.role = role if tenant_id is not None: instance.tenant_id = tenant_id if principal_type is not None: instance.principal_type = principal_type return instance def synapse_kusto_pool_principal_assignment_delete(client, workspace_name, kusto_pool_name, principal_assignment_name, resource_group_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_delete, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, principal_assignment_name=principal_assignment_name, resource_group_name=resource_group_name) def synapse_kusto_database_principal_assignment_list(client, workspace_name, kusto_pool_name, database_name, resource_group_name): return client.list(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, resource_group_name=resource_group_name) def synapse_kusto_database_principal_assignment_show(client, workspace_name, kusto_pool_name, database_name, principal_assignment_name, resource_group_name): return client.get(workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, principal_assignment_name=principal_assignment_name, resource_group_name=resource_group_name) def synapse_kusto_database_principal_assignment_create(client, workspace_name, kusto_pool_name, database_name, principal_assignment_name, resource_group_name, principal_id=None, role=None, tenant_id=None, principal_type=None, no_wait=False): parameters = {} if principal_id is not None: parameters['principal_id'] = principal_id if role is not None: parameters['role'] = role if tenant_id is not None: parameters['tenant_id'] = tenant_id if principal_type is not None: parameters['principal_type'] = principal_type return sdk_no_wait(no_wait, client.begin_create_or_update, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, principal_assignment_name=principal_assignment_name, resource_group_name=resource_group_name, parameters=parameters) def synapse_kusto_database_principal_assignment_update(instance, workspace_name, kusto_pool_name, database_name, principal_assignment_name, resource_group_name, principal_id=None, role=None, tenant_id=None, principal_type=None, no_wait=False): if principal_id is not None: instance.principal_id = principal_id if role is not None: instance.role = role if tenant_id is not None: instance.tenant_id = tenant_id if principal_type is not None: instance.principal_type = principal_type return instance def synapse_kusto_database_principal_assignment_delete(client, workspace_name, kusto_pool_name, database_name, principal_assignment_name, resource_group_name, no_wait=False): return sdk_no_wait(no_wait, client.begin_delete, workspace_name=workspace_name, kusto_pool_name=kusto_pool_name, database_name=database_name, principal_assignment_name=principal_assignment_name, resource_group_name=resource_group_name)
from __future__ import print_function from pylab import * import functools from matplotlib.pyplot import * import _dict_it import _dict_find import _dict_update_slice import _set_it import _set_insert import _set_find import _set_find_local import _set_create import _set_insert_sort import _dict_insert_sort import _set_insert_erase import _dict_insert_erase import _set_insert_rank import _set_insert_min_gap import _set_erase_slice import _set_insert_overlapping_intervals _run_create = False _run_insert_overlapping_intervals = False _run_it = False _run_insert_sort = False _run_insert_rank = False _run_insert_erase = True _run_find = False _run_erase_slice = False _run_update_slice = False _run_find_local = False _run_insert_min_gap = False class _Recorder(object): def __init__(self): self._x_vals = [] self._results = dict([]) def add_results(self, x_val, res): print(res) self._x_vals.append(x_val) for n in list(res.keys()): self._results.setdefault(n, []).append(res[n]) def maxes_res(self): maxes = [(n, max(res)) for n, res in self._results.items()] maxes.sort(key = lambda nm: nm[1]) return maxes, self._results def _single_malt(fn, x_range, num_its, algs, title, f_name): fig = figure() ax = subplot(111) xlabel('# Items') ylabel('Time (sec.)') ticklabel_format(style = 'sci', axis='y', scilimits=(0,0)) r = _Recorder() for x in x_range: print('running', f_name, x) r.add_results(x, fn(algs, x, num_its)) maxes, res = r.maxes_res() for n in [nm[0] for nm in maxes]: ax.plot(x_range, res[n], label = n) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.3, box.width, box.height * 0.72]) ax.legend( [n for (n, m) in maxes], loc = 'upper center', bbox_to_anchor = (0.5, -0.1), ncol = 2 if len(maxes) > 5 else 1) text(0.5, 1.08, title, horizontalalignment='center', fontsize = 13, transform = ax.transAxes) # subtitle(title) savefig(f_name) _banyans = [ 'banyan_red_black_tree', 'banyan_splay_tree', 'banyan_sorted_list', 'banyan_red_black_tree_gen', 'banyan_splay_tree_gen', 'banyan_sorted_list_gen'] if __name__ == '__main__': #num_its = 1 num_its = 30 #base = 1 base = 50 if _run_find_local: for type_, type_str in [(int, 'Int'), (str, 'Str')]: _single_malt( functools.partial(_set_find_local.run_tests, type_ = type_), [base * i for i in range(1, 10)], 500 * num_its, _banyans + ['blist', 'btrees', 'set'], 'All Items Repeated Find Time As A Function Of # Items', type_str + 'SetFindLocalAll.png') _single_malt( functools.partial(_set_find_local.run_tests, type_ = type_), [base * i for i in range(1, 10)], 500 * num_its, _banyans + ['btrees', 'set'], 'All Items Repeated Find Time As A Function Of # Items', type_str + 'SetFindLocalAllNoBList.png') _single_malt( functools.partial(_set_find_local.run_tests, type_ = type_), [base * i for i in range(1, 10)], 500 * num_its, ['banyan_red_black_tree', 'banyan_splay_tree', 'set', 'btrees'], 'All Items Repeated Find Time As A Function Of # Items', type_str + 'SetFindLocalCompetitive.png') if _run_insert_overlapping_intervals: _single_malt( _set_insert_overlapping_intervals.run_tests, [base * i for i in range(1, 10)], 50 * num_its, ['banyan_red_black_tree', 'bx'], 'All Items Insert + Last Interval Overlaps As A Function Of # Items', 'IntSetInsertOverlappingCompetitive.png') _single_malt( _set_insert_overlapping_intervals.run_tests, [base * i for i in range(1, 10)], 50 * num_its, ['banyan_red_black_tree', 'banyan_red_black_tree_float', 'banyan_red_black_tree_gen', 'bx'], 'All Items Insert + Last Interval Overlaps As A Function Of # Items', 'IntSetInsertOverlappingAll.png') if _run_update_slice: _single_malt( _dict_update_slice.run_tests, [base * i for i in range(1, 10)], 50 * num_its, _banyans + ['bintrees', 'btrees', 'blist', 'dict'], 'Update Fixed-Size Slice As A Function Of # Items', 'IntDictUpdateSliceAll.png') _single_malt( _dict_update_slice.run_tests, [base * i for i in range(1, 10)], 50 * num_its, _banyans + ['btrees', 'dict'], 'Update Fixed-Size Slice As A Function Of # Items', 'IntDictUpdateSliceAllNoBListBintrees.png') _single_malt( _dict_update_slice.run_tests, [base * i for i in range(1, 10)], 50 * num_its, ['banyan_red_black_tree', 'btrees', 'dict'], 'Update Fixed-Size Slice As A Function Of # Items', 'IntDictUpdateSliceCompetitive.png') if _run_erase_slice: _single_malt( _set_erase_slice.run_tests, [base * i for i in range(1, 10)], 50 * num_its, _banyans + ['bintrees', 'set'], 'Erase Fixed-Size Slice As A Function Of # Items', 'IntSetEraseSliceAll.png') _single_malt( _set_erase_slice.run_tests, [base * i for i in range(1, 10)], 50 * num_its, ['banyan_red_black_tree', 'bintrees', 'set'], 'Erase Fixed-Size Slice As A Function Of # Items', 'IntSetEraseSliceCompetitive.png') if _run_find: for type_, type_str in [(int, 'Int'), (str, 'Str')]: _single_malt( functools.partial(_set_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 200 * num_its, [ 'banyan_red_black_tree', 'banyan_red_black_tree_rank_updator', 'btrees', 'set'], 'All Items Find Time As A Function Of # Items', type_str + 'SetFindCompetitiveWithRankUpdator.png') _single_malt( functools.partial(_dict_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 50 * num_its, _banyans + ['blist', 'btrees', 'dict'], 'All Items Find Time As A Function Of # Items', type_str + 'DictFindAll.png') _single_malt( functools.partial(_dict_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 200 * num_its, _banyans + ['btrees', 'dict'], 'All Items Find Time As A Function Of # Items', type_str + 'DictFindAllNoBList.png') _single_malt( functools.partial(_dict_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 200 * num_its, ['banyan_red_black_tree', 'btrees', 'dict'], 'All Items Find Time As A Function Of # Items', type_str + 'DictFindCompetitive.png') _single_malt( functools.partial(_set_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 20 * num_its, _banyans + ['blist', 'btrees', 'set'], 'All Items Find Time As A Function Of # Items', type_str + 'SetFindAll.png') _single_malt( functools.partial(_set_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 200 * num_its, _banyans + ['btrees', 'set'], 'All Items Find Time As A Function Of # Items', type_str + 'SetFindAllNoBList.png') _single_malt( functools.partial(_set_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 200 * num_its, ['banyan_red_black_tree', 'set', 'btrees'], 'All Items Find Time As A Function Of # Items', type_str + 'SetFindCompetitive.png') _single_malt( functools.partial(_set_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 300 * num_its, ['banyan_red_black_tree', 'banyan_sorted_list', 'set', 'btrees'], 'All Items Find Time As A Function Of # Items', type_str + 'SetFindCompetitiveWithSortedList.png') _single_malt( functools.partial(_set_find.run_tests, type_ = type_), [base * i for i in range(1, 10)], 20 * num_its, ['banyan_red_black_tree', 'banyan_splay_tree', 'set', 'btrees'], 'All Items Find Time As A Function Of # Items', type_str + 'SetFindCompetitiveWithSplayTree.png') if _run_it: _single_malt( _dict_it.run_tests, [base * i for i in range(1, 10)], 5000 * num_its, ['banyan_red_black_tree', 'banyan_sorted_list', 'set', 'btrees'], 'Sorted Iteration Time As A Function Of # Items', 'IntSetItCompetitiveWithSortedList.png') _single_malt( _set_it.run_tests, [base * i for i in range(1, 10)], 1000 * num_its, _banyans + ['bintrees', 'blist', 'btrees', 'set'], 'Sorted Iteration Time As A Function Of # Items', 'IntSetItAll.png') _single_malt( _set_it.run_tests, [base * i for i in range(1, 10)], 1000 * num_its, _banyans + ['btrees', 'set'], 'Sorted Iteration Time As A Function Of # Items', 'IntSetItAllNoBListBintrees.png') if _run_insert_sort: for type_, type_str in [(int, 'Int'), (str, 'Str')]: _single_malt( functools.partial(_dict_insert_sort.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, ['banyan_red_black_tree', 'dict', 'btrees'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'DictInsertSortCompetitive.png') _single_malt( functools.partial(_dict_insert_sort.run_tests, type_ = type_), [base * i for i in range(1, 10)], 15 * num_its, _banyans + ['bintrees', 'blist', 'btrees', 'dict'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'DictInsertSortAll.png') _single_malt( functools.partial(_dict_insert_sort.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, _banyans + ['btrees', 'dict'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'DictInsertSortAllNoBlistBintrees.png') _single_malt( functools.partial(_dict_insert_sort.run_tests, type_ = type_), [30 * base * i for i in range(1, 10)], 3, ['banyan_red_black_tree', 'dict', 'btrees'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'DictInsertSortCompetitiveLarger.png') _single_malt( functools.partial(_set_insert_sort.run_tests, type_ = type_), [base * i for i in range(1, 10)], 15 * num_its, _banyans + ['bintrees', 'blist', 'btrees', 'set'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'SetInsertSortAll.png') _single_malt( functools.partial(_set_insert_sort.run_tests, type_ = type_), [base * i for i in range(1, 10)], 15 * num_its, _banyans + ['btrees', 'set'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'SetInsertSortAllNoBlistBintrees.png') _single_malt( functools.partial(_set_insert_sort.run_tests, type_ = type_), [30 * base * i for i in range(1, 10)], 3, ['banyan_red_black_tree', 'set', 'btrees'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'SetInsertSortCompetitiveLarger.png') _single_malt( functools.partial(_set_insert_sort.run_tests, type_ = type_), [base * i for i in range(1, 10)], 60 * num_its, ['banyan_red_black_tree', 'set', 'btrees'], 'Insert + Sorted Iteration Time As A Function Of # Items', type_str + 'SetInsertSortCompetitive.png') if _run_insert_rank: _single_malt( _set_insert_rank.run_tests, [base * i for i in range(1, 10)], 30, _banyans + ['banyan_red_black_tree_rank_updator', 'btrees', 'set'], 'Insert + Rank Time As A Function Of # Items', 'IntSetInsertRankAllNoBListBintreesWithRankUpdator.png') _single_malt( _set_insert_rank.run_tests, [base * i for i in range(1, 10)], 30, _banyans + ['banyan_red_black_tree_rank_updator', 'bintrees', 'blist', 'btrees', 'set'], 'Insert + Rank Time As A Function Of # Items', 'IntSetInsertRankAllWithRankUpdator.png') _single_malt( _set_insert_rank.run_tests, [base * i for i in range(1, 10)], 30, ['banyan_red_black_tree', 'banyan_red_black_tree_rank_updator', 'set', 'btrees'], 'Insert + Rank Time As A Function Of # Items', 'IntSetInsertRankCompetitiveWithRankUpdator.png') if _run_insert_min_gap: _single_malt( _set_insert_min_gap.run_tests, [base * i for i in range(1, 10)], 20, _banyans + ['banyan_red_black_tree_min_gap_updator', 'btrees', 'set'], 'Insert + Min-Gap Time As A Function Of # Items', 'IntSetInsertMinGapAllNoBListBintreesWithMinGapUpdator.png') _single_malt( _set_insert_min_gap.run_tests, [base * i for i in range(1, 10)], 20, _banyans + ['banyan_red_black_tree_min_gap_updator', 'bintrees', 'blist', 'btrees', 'set'], 'Insert + Min-Gap Time As A Function Of # Items', 'IntSetInsertMinGapAllWithMinGapUpdator.png') _single_malt( _set_insert_min_gap.run_tests, [base * i for i in range(1, 10)], 20, ['banyan_red_black_tree', 'banyan_red_black_tree_min_gap_updator', 'set', 'btrees'], 'Insert + Min-Gap Time As A Function Of # Items', 'IntSetInsertMinGapCompetitiveWithMinGapUpdator.png') if _run_insert_erase: for type_, type_str in [(int, 'Int'), (str, 'Str')]: _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 30 * num_its, ['banyan_red_black_tree', 'banyan_red_black_tree_rank_updator', 'dict', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DictInsertEraseCompetitiveWithRankUpdator.png') _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [10 * base * i for i in range(1, 10)], 3 * num_its, ['banyan_red_black_tree', 'banyan_red_black_tree_rank_updator', 'dict', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DitInsertEraseCompetitiveWithNodeUpdatorLonger.png') _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, ['banyan_red_black_tree', 'dict', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DictInsertEraseCompetitive.png') _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, ['banyan_red_black_tree', 'banyan_sorted_list', 'dict', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DictInsertEraseCompetitiveWithSortedList.png') _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, _banyans + ['bintrees', 'blist', 'btrees', 'dict'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DictInsertEraseAll.png') _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, _banyans + ['bintrees', 'btrees', 'dict'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DictInsertEraseAllNoBList.png') _single_malt( functools.partial(_dict_insert_erase.run_tests, type_ = type_), [30 * base * i for i in range(1, 10)], 3, ['banyan_red_black_tree', 'banyan_sorted_list', 'dict', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'DictInsertEraseCompetitiveLonger.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 30 * num_its, ['banyan_red_black_tree', 'banyan_red_black_tree_rank_updator', 'set', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseCompetitiveWithRankUpdator.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [10 * base * i for i in range(1, 10)], 3 * num_its, ['banyan_red_black_tree', 'banyan_red_black_tree_rank_updator', 'set', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseCompetitiveWithNodeUpdatorLonger.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, ['banyan_red_black_tree', 'set', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseCompetitive.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, ['banyan_red_black_tree', 'banyan_sorted_list', 'set', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseCompetitiveWithSortedList.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, _banyans + ['bintrees', 'blist', 'btrees', 'set'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseAll.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [base * i for i in range(1, 10)], 100 * num_its, _banyans + ['bintrees', 'btrees', 'set'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseAllNoBList.png') _single_malt( functools.partial(_set_insert_erase.run_tests, type_ = type_), [30 * base * i for i in range(1, 10)], 3, ['banyan_red_black_tree', 'banyan_sorted_list', 'set', 'btrees'], 'Insert + Erase Time As A Function Of # Items', type_str + 'SetInsertEraseCompetitiveLonger.png') if _run_create: for type_, type_str in [(int, 'Int'), (str, 'Str')]: _single_malt( functools.partial(_set_create.run_tests, type_ = type_), [base * i for i in range(1, 10)], 30 * num_its, ['banyan_red_black_tree', 'banyan_sorted_list', 'set', 'btrees'], 'Create Time As A Function Of # Items', type_str + 'SetCreateCompetitiveWithSortedList.png') _single_malt( functools.partial(_set_create.run_tests, type_ = type_), [base * i for i in range(1, 10)], 30 * num_its, _banyans + ['bintrees', 'blist', 'btrees', 'set'], 'Create Time As A Function Of # Items', type_str + 'SetCreateAll.png') _single_malt( functools.partial(_set_create.run_tests, type_ = type_), [base * i for i in range(1, 10)], 30 * num_its, _banyans + ['btrees', 'set'], 'Create Time As A Function Of # Items', type_str + 'SetCreateAllNoBListBintrees.png')
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from __future__ import absolute_import import re import sys import platform from setuptools import setup from setuptools.command.test import test as test_command # remember if we already had six _before_ installation try: import six # noqa _HAD_SIX = True except ImportError: _HAD_SIX = False CPY = platform.python_implementation() == 'CPython' PY3 = sys.version_info >= (3,) PY33 = (3, 3) <= sys.version_info < (3, 4) LONGSDESC = open('README.rst').read() # get version string from "autobahn/__init__.py" # See: http://stackoverflow.com/a/7071358/884770 # VERSIONFILE = "autobahn/__init__.py" verstrline = open(VERSIONFILE, "rt").read() VSRE = r"^__version__ = u['\"]([^'\"]*)['\"]" mo = re.search(VSRE, verstrline, re.M) if mo: verstr = mo.group(1) else: raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,)) # Autobahn core packages # packages = [ 'autobahn', 'autobahn.wamp', 'autobahn.wamp.test', 'autobahn.websocket', 'autobahn.websocket.test', 'autobahn.asyncio', 'autobahn.twisted', 'twisted.plugins' ] # Twisted dependencies (be careful bumping these minimal versions, # as we make claims to support older Twisted!) # extras_require_twisted = [ "zope.interface>=3.6", # Zope Public License "Twisted>=12.1" # MIT license ] # asyncio dependencies # if PY3: if PY33: # "Tulip" extras_require_asyncio = [ "asyncio>=3.4.3" # Apache 2.0 ] else: # Python 3.4+ has asyncio builtin extras_require_asyncio = [] else: # backport of asyncio for Python 2 extras_require_asyncio = [ "trollius>=2.0", # Apache 2.0 "futures>=3.0.3" # BSD license ] # C-based WebSocket acceleration # if CPY: extras_require_accelerate = [ "wsaccel>=0.6.2" # Apache 2.0 ] # ujson is broken on Windows (https://github.com/esnme/ultrajson/issues/184) if sys.platform != 'win32': extras_require_accelerate.append("ujson>=1.33") # BSD license else: extras_require_accelerate = [] # non-standard WebSocket compression support (FIXME: consider removing altogether) # Ubuntu: sudo apt-get install libsnappy-dev # lz4: do we need that anyway? extras_require_compress = [ "python-snappy>=0.5", # BSD license "lz4>=0.7.0" # BSD license ] # non-JSON WAMP serialization support (namely MsgPack) # extras_require_serialization = [ "msgpack-python>=0.4.6" # Apache 2.0 license ] # everything # extras_require_all = extras_require_twisted + extras_require_asyncio + \ extras_require_accelerate + extras_require_compress + extras_require_serialization # development dependencies # extras_require_dev = [ "pep8>=1.6.2", # MIT license "pep8-naming>=0.3.3", # MIT license "flake8>=2.4.1", # MIT license "pyflakes>=0.9.2", # MIT license "mock>=1.3.0", # BSD license "pytest>=2.7.2", # MIT license "unittest2>=1.1.0" # BSD license ] # for testing by users with "python setup.py test" (not Tox, which we use) # test_requirements = [ "pytest>=2.7.2", # MIT license "mock>=1.3.0" # BSD license ] # pytest integration for setuptools. see: # http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands # https://github.com/pyca/cryptography/pull/678/files class PyTest(test_command): def finalize_options(self): test_command.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # Import here because in module scope the eggs are not loaded. import pytest errno = pytest.main(self.test_args) sys.exit(errno) # Now install Autobahn .. # setup( name='autobahn', version=verstr, description='WebSocket client & server library, WAMP real-time framework', long_description=LONGSDESC, license='MIT License', author='Tavendo GmbH', author_email='autobahnws@googlegroups.com', url='http://autobahn.ws/python', platforms='Any', install_requires=[ 'six>=1.9.0', # MIT license 'txaio>=1.1.0' # MIT license ], extras_require={ 'all': extras_require_all, 'asyncio': extras_require_asyncio, 'twisted': extras_require_twisted, 'accelerate': extras_require_accelerate, 'compress': extras_require_compress, 'serialization': extras_require_serialization, 'dev': extras_require_dev, }, tests_require=test_requirements, cmdclass={'test': PyTest}, packages=packages, zip_safe=False, # http://pypi.python.org/pypi?%3Aaction=list_classifiers # classifiers=["License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", "Environment :: No Input/Output (Daemon)", "Framework :: Twisted", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Jython", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Communications", "Topic :: System :: Distributed Computing", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Object Brokering"], keywords='autobahn autobahn.ws websocket realtime rfc6455 wamp rpc pubsub twisted asyncio' ) try: from twisted.internet import reactor print("Twisted found (default reactor is {0})".format(reactor.__class__)) except ImportError: # the user doesn't have Twisted, so skip pass else: # Make Twisted regenerate the dropin.cache, if possible. This is necessary # because in a site-wide install, dropin.cache cannot be rewritten by # normal users. if _HAD_SIX: # only proceed if we had had six already _before_ installing AutobahnPython, # since it produces errs/warns otherwise try: from twisted.plugin import IPlugin, getPlugins list(getPlugins(IPlugin)) except Exception as e: print("Failed to update Twisted plugin cache: {0}".format(e)) else: print("Twisted dropin.cache regenerated.") else: print("Warning: regenerate of Twisted dropin.cache skipped (can't run when six wasn't there before)")
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for Perspective Broker module. TODO: update protocol level tests to use new connection API, leaving only specific tests for old API. """ # issue1195 TODOs: replace pump.pump() with something involving Deferreds. # Clean up warning suppression. import sys, os, time, gc, weakref from cStringIO import StringIO from zope.interface import implements, Interface from twisted.trial import unittest from twisted.spread import pb, util, publish, jelly from twisted.internet import protocol, main, reactor from twisted.internet.error import ConnectionRefusedError from twisted.internet.defer import Deferred, gatherResults, succeed from twisted.protocols.policies import WrappingFactory from twisted.python import failure, log from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials from twisted.cred import portal, checkers, credentials class Dummy(pb.Viewable): def view_doNothing(self, user): if isinstance(user, DummyPerspective): return 'hello world!' else: return 'goodbye, cruel world!' class DummyPerspective(pb.Avatar): """ An L{IPerspective} avatar which will be used in some tests. """ def perspective_getDummyViewPoint(self): return Dummy() class DummyRealm(object): implements(portal.IRealm) def requestAvatar(self, avatarId, mind, *interfaces): for iface in interfaces: if iface is pb.IPerspective: return iface, DummyPerspective(avatarId), lambda: None class IOPump: """ Utility to pump data between clients and servers for protocol testing. Perhaps this is a utility worthy of being in protocol.py? """ def __init__(self, client, server, clientIO, serverIO): self.client = client self.server = server self.clientIO = clientIO self.serverIO = serverIO def flush(self): """ Pump until there is no more input or output or until L{stop} is called. This does not run any timers, so don't use it with any code that calls reactor.callLater. """ # failsafe timeout self._stop = False timeout = time.time() + 5 while not self._stop and self.pump(): if time.time() > timeout: return def stop(self): """ Stop a running L{flush} operation, even if data remains to be transferred. """ self._stop = True def pump(self): """ Move data back and forth. Returns whether any data was moved. """ self.clientIO.seek(0) self.serverIO.seek(0) cData = self.clientIO.read() sData = self.serverIO.read() self.clientIO.seek(0) self.serverIO.seek(0) self.clientIO.truncate() self.serverIO.truncate() self.client.transport._checkProducer() self.server.transport._checkProducer() for byte in cData: self.server.dataReceived(byte) for byte in sData: self.client.dataReceived(byte) if cData or sData: return 1 else: return 0 def connectedServerAndClient(realm=None): """ Connect a client and server L{Broker} together with an L{IOPump} @param realm: realm to use, defaulting to a L{DummyRealm} @returns: a 3-tuple (client, server, pump). """ realm = realm or DummyRealm() clientBroker = pb.Broker() checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(guest='guest') factory = pb.PBServerFactory(portal.Portal(realm, [checker])) serverBroker = factory.buildProtocol(('127.0.0.1',)) clientTransport = StringIO() serverTransport = StringIO() clientBroker.makeConnection(protocol.FileWrapper(clientTransport)) serverBroker.makeConnection(protocol.FileWrapper(serverTransport)) pump = IOPump(clientBroker, serverBroker, clientTransport, serverTransport) # Challenge-response authentication: pump.flush() return clientBroker, serverBroker, pump class SimpleRemote(pb.Referenceable): def remote_thunk(self, arg): self.arg = arg return arg + 1 def remote_knuth(self, arg): raise Exception() class NestedRemote(pb.Referenceable): def remote_getSimple(self): return SimpleRemote() class SimpleCopy(pb.Copyable): def __init__(self): self.x = 1 self.y = {"Hello":"World"} self.z = ['test'] class SimpleLocalCopy(pb.RemoteCopy): pass pb.setUnjellyableForClass(SimpleCopy, SimpleLocalCopy) class SimpleFactoryCopy(pb.Copyable): """ @cvar allIDs: hold every created instances of this class. @type allIDs: C{dict} """ allIDs = {} def __init__(self, id): self.id = id SimpleFactoryCopy.allIDs[id] = self def createFactoryCopy(state): """ Factory of L{SimpleFactoryCopy}, getting a created instance given the C{id} found in C{state}. """ stateId = state.get("id", None) if stateId is None: raise RuntimeError("factory copy state has no 'id' member %s" % (repr(state),)) if not stateId in SimpleFactoryCopy.allIDs: raise RuntimeError("factory class has no ID: %s" % (SimpleFactoryCopy.allIDs,)) inst = SimpleFactoryCopy.allIDs[stateId] if not inst: raise RuntimeError("factory method found no object with id") return inst pb.setUnjellyableFactoryForClass(SimpleFactoryCopy, createFactoryCopy) class NestedCopy(pb.Referenceable): def remote_getCopy(self): return SimpleCopy() def remote_getFactory(self, value): return SimpleFactoryCopy(value) class SimpleCache(pb.Cacheable): def __init___(self): self.x = 1 self.y = {"Hello":"World"} self.z = ['test'] class NestedComplicatedCache(pb.Referenceable): def __init__(self): self.c = VeryVeryComplicatedCacheable() def remote_getCache(self): return self.c class VeryVeryComplicatedCacheable(pb.Cacheable): def __init__(self): self.x = 1 self.y = 2 self.foo = 3 def setFoo4(self): self.foo = 4 self.observer.callRemote('foo',4) def getStateToCacheAndObserveFor(self, perspective, observer): self.observer = observer return {"x": self.x, "y": self.y, "foo": self.foo} def stoppedObserving(self, perspective, observer): log.msg("stopped observing") observer.callRemote("end") if observer == self.observer: self.observer = None class RatherBaroqueCache(pb.RemoteCache): def observe_foo(self, newFoo): self.foo = newFoo def observe_end(self): log.msg("the end of things") pb.setUnjellyableForClass(VeryVeryComplicatedCacheable, RatherBaroqueCache) class SimpleLocalCache(pb.RemoteCache): def setCopyableState(self, state): self.__dict__.update(state) def checkMethod(self): return self.check def checkSelf(self): return self def check(self): return 1 pb.setUnjellyableForClass(SimpleCache, SimpleLocalCache) class NestedCache(pb.Referenceable): def __init__(self): self.x = SimpleCache() def remote_getCache(self): return [self.x,self.x] def remote_putCache(self, cache): return (self.x is cache) class Observable(pb.Referenceable): def __init__(self): self.observers = [] def remote_observe(self, obs): self.observers.append(obs) def remote_unobserve(self, obs): self.observers.remove(obs) def notify(self, obj): for observer in self.observers: observer.callRemote('notify', self, obj) class DeferredRemote(pb.Referenceable): def __init__(self): self.run = 0 def runMe(self, arg): self.run = arg return arg + 1 def dontRunMe(self, arg): assert 0, "shouldn't have been run!" def remote_doItLater(self): """ Return a L{Deferred} to be fired on client side. When fired, C{self.runMe} is called. """ d = Deferred() d.addCallbacks(self.runMe, self.dontRunMe) self.d = d return d class Observer(pb.Referenceable): notified = 0 obj = None def remote_notify(self, other, obj): self.obj = obj self.notified = self.notified + 1 other.callRemote('unobserve',self) class NewStyleCopy(pb.Copyable, pb.RemoteCopy, object): def __init__(self, s): self.s = s pb.setUnjellyableForClass(NewStyleCopy, NewStyleCopy) class NewStyleCopy2(pb.Copyable, pb.RemoteCopy, object): allocated = 0 initialized = 0 value = 1 def __new__(self): NewStyleCopy2.allocated += 1 inst = object.__new__(self) inst.value = 2 return inst def __init__(self): NewStyleCopy2.initialized += 1 pb.setUnjellyableForClass(NewStyleCopy2, NewStyleCopy2) class NewStyleCacheCopy(pb.Cacheable, pb.RemoteCache, object): def getStateToCacheAndObserveFor(self, perspective, observer): return self.__dict__ pb.setUnjellyableForClass(NewStyleCacheCopy, NewStyleCacheCopy) class Echoer(pb.Root): def remote_echo(self, st): return st class CachedReturner(pb.Root): def __init__(self, cache): self.cache = cache def remote_giveMeCache(self, st): return self.cache class NewStyleTests(unittest.TestCase): def setUp(self): """ Create a pb server using L{Echoer} protocol and connect a client to it. """ self.serverFactory = pb.PBServerFactory(Echoer()) self.wrapper = WrappingFactory(self.serverFactory) self.server = reactor.listenTCP(0, self.wrapper) clientFactory = pb.PBClientFactory() reactor.connectTCP("localhost", self.server.getHost().port, clientFactory) def gotRoot(ref): self.ref = ref return clientFactory.getRootObject().addCallback(gotRoot) def tearDown(self): """ Close client and server connections, reset values of L{NewStyleCopy2} class variables. """ NewStyleCopy2.allocated = 0 NewStyleCopy2.initialized = 0 NewStyleCopy2.value = 1 self.ref.broker.transport.loseConnection() # Disconnect any server-side connections too. for proto in self.wrapper.protocols: proto.transport.loseConnection() return self.server.stopListening() def test_newStyle(self): """ Create a new style object, send it over the wire, and check the result. """ orig = NewStyleCopy("value") d = self.ref.callRemote("echo", orig) def cb(res): self.failUnless(isinstance(res, NewStyleCopy)) self.assertEqual(res.s, "value") self.failIf(res is orig) # no cheating :) d.addCallback(cb) return d def test_alloc(self): """ Send a new style object and check the number of allocations. """ orig = NewStyleCopy2() self.assertEqual(NewStyleCopy2.allocated, 1) self.assertEqual(NewStyleCopy2.initialized, 1) d = self.ref.callRemote("echo", orig) def cb(res): # receiving the response creates a third one on the way back self.failUnless(isinstance(res, NewStyleCopy2)) self.assertEqual(res.value, 2) self.assertEqual(NewStyleCopy2.allocated, 3) self.assertEqual(NewStyleCopy2.initialized, 1) self.failIf(res is orig) # no cheating :) # sending the object creates a second one on the far side d.addCallback(cb) return d class ConnectionNotifyServerFactory(pb.PBServerFactory): """ A server factory which stores the last connection and fires a L{Deferred} on connection made. This factory can handle only one client connection. @ivar protocolInstance: the last protocol instance. @type protocolInstance: C{pb.Broker} @ivar connectionMade: the deferred fired upon connection. @type connectionMade: C{Deferred} """ protocolInstance = None def __init__(self, root): """ Initialize the factory. """ pb.PBServerFactory.__init__(self, root) self.connectionMade = Deferred() def clientConnectionMade(self, protocol): """ Store the protocol and fire the connection deferred. """ self.protocolInstance = protocol d, self.connectionMade = self.connectionMade, None if d is not None: d.callback(None) class NewStyleCachedTests(unittest.TestCase): def setUp(self): """ Create a pb server using L{CachedReturner} protocol and connect a client to it. """ self.orig = NewStyleCacheCopy() self.orig.s = "value" self.server = reactor.listenTCP(0, ConnectionNotifyServerFactory(CachedReturner(self.orig))) clientFactory = pb.PBClientFactory() reactor.connectTCP("localhost", self.server.getHost().port, clientFactory) def gotRoot(ref): self.ref = ref d1 = clientFactory.getRootObject().addCallback(gotRoot) d2 = self.server.factory.connectionMade return gatherResults([d1, d2]) def tearDown(self): """ Close client and server connections. """ self.server.factory.protocolInstance.transport.loseConnection() self.ref.broker.transport.loseConnection() return self.server.stopListening() def test_newStyleCache(self): """ A new-style cacheable object can be retrieved and re-retrieved over a single connection. The value of an attribute of the cacheable can be accessed on the receiving side. """ d = self.ref.callRemote("giveMeCache", self.orig) def cb(res, again): self.assertIsInstance(res, NewStyleCacheCopy) self.assertEqual("value", res.s) # no cheating :) self.assertNotIdentical(self.orig, res) if again: # Save a reference so it stays alive for the rest of this test self.res = res # And ask for it again to exercise the special re-jelly logic in # Cacheable. return self.ref.callRemote("giveMeCache", self.orig) d.addCallback(cb, True) d.addCallback(cb, False) return d class BrokerTests(unittest.TestCase): thunkResult = None def tearDown(self): try: # from RemotePublished.getFileName os.unlink('None-None-TESTING.pub') except OSError: pass def thunkErrorBad(self, error): self.fail("This should cause a return value, not %s" % (error,)) def thunkResultGood(self, result): self.thunkResult = result def thunkErrorGood(self, tb): pass def thunkResultBad(self, result): self.fail("This should cause an error, not %s" % (result,)) def test_reference(self): c, s, pump = connectedServerAndClient() class X(pb.Referenceable): def remote_catch(self,arg): self.caught = arg class Y(pb.Referenceable): def remote_throw(self, a, b): a.callRemote('catch', b) s.setNameForLocal("y", Y()) y = c.remoteForName("y") x = X() z = X() y.callRemote('throw', x, z) pump.pump() pump.pump() pump.pump() self.assertIdentical(x.caught, z, "X should have caught Z") # make sure references to remote methods are equals self.assertEqual(y.remoteMethod('throw'), y.remoteMethod('throw')) def test_result(self): c, s, pump = connectedServerAndClient() for x, y in (c, s), (s, c): # test reflexivity foo = SimpleRemote() x.setNameForLocal("foo", foo) bar = y.remoteForName("foo") self.expectedThunkResult = 8 bar.callRemote('thunk',self.expectedThunkResult - 1 ).addCallbacks(self.thunkResultGood, self.thunkErrorBad) # Send question. pump.pump() # Send response. pump.pump() # Shouldn't require any more pumping than that... self.assertEqual(self.thunkResult, self.expectedThunkResult, "result wasn't received.") def refcountResult(self, result): self.nestedRemote = result def test_tooManyRefs(self): l = [] e = [] c, s, pump = connectedServerAndClient() foo = NestedRemote() s.setNameForLocal("foo", foo) x = c.remoteForName("foo") for igno in xrange(pb.MAX_BROKER_REFS + 10): if s.transport.closed or c.transport.closed: break x.callRemote("getSimple").addCallbacks(l.append, e.append) pump.pump() expected = (pb.MAX_BROKER_REFS - 1) self.assertTrue(s.transport.closed, "transport was not closed") self.assertEqual(len(l), expected, "expected %s got %s" % (expected, len(l))) def test_copy(self): c, s, pump = connectedServerAndClient() foo = NestedCopy() s.setNameForLocal("foo", foo) x = c.remoteForName("foo") x.callRemote('getCopy' ).addCallbacks(self.thunkResultGood, self.thunkErrorBad) pump.pump() pump.pump() self.assertEqual(self.thunkResult.x, 1) self.assertEqual(self.thunkResult.y['Hello'], 'World') self.assertEqual(self.thunkResult.z[0], 'test') def test_observe(self): c, s, pump = connectedServerAndClient() # this is really testing the comparison between remote objects, to make # sure that you can *UN*observe when you have an observer architecture. a = Observable() b = Observer() s.setNameForLocal("a", a) ra = c.remoteForName("a") ra.callRemote('observe',b) pump.pump() a.notify(1) pump.pump() pump.pump() a.notify(10) pump.pump() pump.pump() self.assertNotIdentical(b.obj, None, "didn't notify") self.assertEqual(b.obj, 1, 'notified too much') def test_defer(self): c, s, pump = connectedServerAndClient() d = DeferredRemote() s.setNameForLocal("d", d) e = c.remoteForName("d") pump.pump(); pump.pump() results = [] e.callRemote('doItLater').addCallback(results.append) pump.pump(); pump.pump() self.assertFalse(d.run, "Deferred method run too early.") d.d.callback(5) self.assertEqual(d.run, 5, "Deferred method run too late.") pump.pump(); pump.pump() self.assertEqual(results[0], 6, "Incorrect result.") def test_refcount(self): c, s, pump = connectedServerAndClient() foo = NestedRemote() s.setNameForLocal("foo", foo) bar = c.remoteForName("foo") bar.callRemote('getSimple' ).addCallbacks(self.refcountResult, self.thunkErrorBad) # send question pump.pump() # send response pump.pump() # delving into internal structures here, because GC is sort of # inherently internal. rluid = self.nestedRemote.luid self.assertIn(rluid, s.localObjects) del self.nestedRemote # nudge the gc if sys.hexversion >= 0x2000000: gc.collect() # try to nudge the GC even if we can't really pump.pump() pump.pump() pump.pump() self.assertNotIn(rluid, s.localObjects) def test_cache(self): c, s, pump = connectedServerAndClient() obj = NestedCache() obj2 = NestedComplicatedCache() vcc = obj2.c s.setNameForLocal("obj", obj) s.setNameForLocal("xxx", obj2) o2 = c.remoteForName("obj") o3 = c.remoteForName("xxx") coll = [] o2.callRemote("getCache" ).addCallback(coll.append).addErrback(coll.append) o2.callRemote("getCache" ).addCallback(coll.append).addErrback(coll.append) complex = [] o3.callRemote("getCache").addCallback(complex.append) o3.callRemote("getCache").addCallback(complex.append) pump.flush() # `worst things first' self.assertEqual(complex[0].x, 1) self.assertEqual(complex[0].y, 2) self.assertEqual(complex[0].foo, 3) vcc.setFoo4() pump.flush() self.assertEqual(complex[0].foo, 4) self.assertEqual(len(coll), 2) cp = coll[0][0] self.assertIdentical(cp.checkMethod().im_self, cp, "potential refcounting issue") self.assertIdentical(cp.checkSelf(), cp, "other potential refcounting issue") col2 = [] o2.callRemote('putCache',cp).addCallback(col2.append) pump.flush() # The objects were the same (testing lcache identity) self.assertTrue(col2[0]) # test equality of references to methods self.assertEqual(o2.remoteMethod("getCache"), o2.remoteMethod("getCache")) # now, refcounting (similiar to testRefCount) luid = cp.luid baroqueLuid = complex[0].luid self.assertIn(luid, s.remotelyCachedObjects, "remote cache doesn't have it") del coll del cp pump.flush() del complex del col2 # extra nudge... pump.flush() # del vcc.observer # nudge the gc if sys.hexversion >= 0x2000000: gc.collect() # try to nudge the GC even if we can't really pump.flush() # The GC is done with it. self.assertNotIn(luid, s.remotelyCachedObjects, "Server still had it after GC") self.assertNotIn(luid, c.locallyCachedObjects, "Client still had it after GC") self.assertNotIn(baroqueLuid, s.remotelyCachedObjects, "Server still had complex after GC") self.assertNotIn(baroqueLuid, c.locallyCachedObjects, "Client still had complex after GC") self.assertIdentical(vcc.observer, None, "observer was not removed") def test_publishable(self): try: os.unlink('None-None-TESTING.pub') # from RemotePublished.getFileName except OSError: pass # Sometimes it's not there. c, s, pump = connectedServerAndClient() foo = GetPublisher() # foo.pub.timestamp = 1.0 s.setNameForLocal("foo", foo) bar = c.remoteForName("foo") accum = [] bar.callRemote('getPub').addCallbacks(accum.append, self.thunkErrorBad) pump.flush() obj = accum.pop() self.assertEqual(obj.activateCalled, 1) self.assertEqual(obj.isActivated, 1) self.assertEqual(obj.yayIGotPublished, 1) # timestamp's dirty, we don't have a cache file self.assertEqual(obj._wasCleanWhenLoaded, 0) c, s, pump = connectedServerAndClient() s.setNameForLocal("foo", foo) bar = c.remoteForName("foo") bar.callRemote('getPub').addCallbacks(accum.append, self.thunkErrorBad) pump.flush() obj = accum.pop() # timestamp's clean, our cache file is up-to-date self.assertEqual(obj._wasCleanWhenLoaded, 1) def gotCopy(self, val): self.thunkResult = val.id def test_factoryCopy(self): c, s, pump = connectedServerAndClient() ID = 99 obj = NestedCopy() s.setNameForLocal("foo", obj) x = c.remoteForName("foo") x.callRemote('getFactory', ID ).addCallbacks(self.gotCopy, self.thunkResultBad) pump.pump() pump.pump() pump.pump() self.assertEqual(self.thunkResult, ID, "ID not correct on factory object %s" % (self.thunkResult,)) bigString = "helloworld" * 50 callbackArgs = None callbackKeyword = None def finishedCallback(*args, **kw): global callbackArgs, callbackKeyword callbackArgs = args callbackKeyword = kw class Pagerizer(pb.Referenceable): def __init__(self, callback, *args, **kw): self.callback, self.args, self.kw = callback, args, kw def remote_getPages(self, collector): util.StringPager(collector, bigString, 100, self.callback, *self.args, **self.kw) self.args = self.kw = None class FilePagerizer(pb.Referenceable): pager = None def __init__(self, filename, callback, *args, **kw): self.filename = filename self.callback, self.args, self.kw = callback, args, kw def remote_getPages(self, collector): self.pager = util.FilePager(collector, file(self.filename), self.callback, *self.args, **self.kw) self.args = self.kw = None class PagingTests(unittest.TestCase): """ Test pb objects sending data by pages. """ def setUp(self): """ Create a file used to test L{util.FilePager}. """ self.filename = self.mktemp() fd = file(self.filename, 'w') fd.write(bigString) fd.close() def test_pagingWithCallback(self): """ Test L{util.StringPager}, passing a callback to fire when all pages are sent. """ c, s, pump = connectedServerAndClient() s.setNameForLocal("foo", Pagerizer(finishedCallback, 'hello', value=10)) x = c.remoteForName("foo") l = [] util.getAllPages(x, "getPages").addCallback(l.append) while not l: pump.pump() self.assertEqual(''.join(l[0]), bigString, "Pages received not equal to pages sent!") self.assertEqual(callbackArgs, ('hello',), "Completed callback not invoked") self.assertEqual(callbackKeyword, {'value': 10}, "Completed callback not invoked") def test_pagingWithoutCallback(self): """ Test L{util.StringPager} without a callback. """ c, s, pump = connectedServerAndClient() s.setNameForLocal("foo", Pagerizer(None)) x = c.remoteForName("foo") l = [] util.getAllPages(x, "getPages").addCallback(l.append) while not l: pump.pump() self.assertEqual(''.join(l[0]), bigString, "Pages received not equal to pages sent!") def test_emptyFilePaging(self): """ Test L{util.FilePager}, sending an empty file. """ filenameEmpty = self.mktemp() fd = file(filenameEmpty, 'w') fd.close() c, s, pump = connectedServerAndClient() pagerizer = FilePagerizer(filenameEmpty, None) s.setNameForLocal("bar", pagerizer) x = c.remoteForName("bar") l = [] util.getAllPages(x, "getPages").addCallback(l.append) ttl = 10 while not l and ttl > 0: pump.pump() ttl -= 1 if not ttl: self.fail('getAllPages timed out') self.assertEqual(''.join(l[0]), '', "Pages received not equal to pages sent!") def test_filePagingWithCallback(self): """ Test L{util.FilePager}, passing a callback to fire when all pages are sent, and verify that the pager doesn't keep chunks in memory. """ c, s, pump = connectedServerAndClient() pagerizer = FilePagerizer(self.filename, finishedCallback, 'frodo', value = 9) s.setNameForLocal("bar", pagerizer) x = c.remoteForName("bar") l = [] util.getAllPages(x, "getPages").addCallback(l.append) while not l: pump.pump() self.assertEqual(''.join(l[0]), bigString, "Pages received not equal to pages sent!") self.assertEqual(callbackArgs, ('frodo',), "Completed callback not invoked") self.assertEqual(callbackKeyword, {'value': 9}, "Completed callback not invoked") self.assertEqual(pagerizer.pager.chunks, []) def test_filePagingWithoutCallback(self): """ Test L{util.FilePager} without a callback. """ c, s, pump = connectedServerAndClient() pagerizer = FilePagerizer(self.filename, None) s.setNameForLocal("bar", pagerizer) x = c.remoteForName("bar") l = [] util.getAllPages(x, "getPages").addCallback(l.append) while not l: pump.pump() self.assertEqual(''.join(l[0]), bigString, "Pages received not equal to pages sent!") self.assertEqual(pagerizer.pager.chunks, []) class DumbPublishable(publish.Publishable): def getStateToPublish(self): return {"yayIGotPublished": 1} class DumbPub(publish.RemotePublished): def activated(self): self.activateCalled = 1 class GetPublisher(pb.Referenceable): def __init__(self): self.pub = DumbPublishable("TESTING") def remote_getPub(self): return self.pub pb.setUnjellyableForClass(DumbPublishable, DumbPub) class DisconnectionTests(unittest.TestCase): """ Test disconnection callbacks. """ def error(self, *args): raise RuntimeError("I shouldn't have been called: %s" % (args,)) def gotDisconnected(self): """ Called on broker disconnect. """ self.gotCallback = 1 def objectDisconnected(self, o): """ Called on RemoteReference disconnect. """ self.assertEqual(o, self.remoteObject) self.objectCallback = 1 def test_badSerialization(self): c, s, pump = connectedServerAndClient() pump.pump() s.setNameForLocal("o", BadCopySet()) g = c.remoteForName("o") l = [] g.callRemote("setBadCopy", BadCopyable()).addErrback(l.append) pump.flush() self.assertEqual(len(l), 1) def test_disconnection(self): c, s, pump = connectedServerAndClient() pump.pump() s.setNameForLocal("o", SimpleRemote()) # get a client reference to server object r = c.remoteForName("o") pump.pump() pump.pump() pump.pump() # register and then unregister disconnect callbacks # making sure they get unregistered c.notifyOnDisconnect(self.error) self.assertIn(self.error, c.disconnects) c.dontNotifyOnDisconnect(self.error) self.assertNotIn(self.error, c.disconnects) r.notifyOnDisconnect(self.error) self.assertIn(r._disconnected, c.disconnects) self.assertIn(self.error, r.disconnectCallbacks) r.dontNotifyOnDisconnect(self.error) self.assertNotIn(r._disconnected, c.disconnects) self.assertNotIn(self.error, r.disconnectCallbacks) # register disconnect callbacks c.notifyOnDisconnect(self.gotDisconnected) r.notifyOnDisconnect(self.objectDisconnected) self.remoteObject = r # disconnect c.connectionLost(failure.Failure(main.CONNECTION_DONE)) self.assertTrue(self.gotCallback) self.assertTrue(self.objectCallback) class FreakOut(Exception): pass class BadCopyable(pb.Copyable): def getStateToCopyFor(self, p): raise FreakOut() class BadCopySet(pb.Referenceable): def remote_setBadCopy(self, bc): return None class LocalRemoteTest(util.LocalAsRemote): reportAllTracebacks = 0 def sync_add1(self, x): return x + 1 def async_add(self, x=0, y=1): return x + y def async_fail(self): raise RuntimeError() class MyPerspective(pb.Avatar): """ @ivar loggedIn: set to C{True} when the avatar is logged in. @type loggedIn: C{bool} @ivar loggedOut: set to C{True} when the avatar is logged out. @type loggedOut: C{bool} """ implements(pb.IPerspective) loggedIn = loggedOut = False def __init__(self, avatarId): self.avatarId = avatarId def perspective_getAvatarId(self): """ Return the avatar identifier which was used to access this avatar. """ return self.avatarId def perspective_getViewPoint(self): return MyView() def perspective_add(self, a, b): """ Add the given objects and return the result. This is a method unavailable on L{Echoer}, so it can only be invoked by authenticated users who received their avatar from L{TestRealm}. """ return a + b def logout(self): self.loggedOut = True class TestRealm(object): """ A realm which repeatedly gives out a single instance of L{MyPerspective} for non-anonymous logins and which gives out a new instance of L{Echoer} for each anonymous login. @ivar lastPerspective: The L{MyPerspective} most recently created and returned from C{requestAvatar}. @ivar perspectiveFactory: A one-argument callable which will be used to create avatars to be returned from C{requestAvatar}. """ perspectiveFactory = MyPerspective lastPerspective = None def requestAvatar(self, avatarId, mind, interface): """ Verify that the mind and interface supplied have the expected values (this should really be done somewhere else, like inside a test method) and return an avatar appropriate for the given identifier. """ assert interface == pb.IPerspective assert mind == "BRAINS!" if avatarId is checkers.ANONYMOUS: return pb.IPerspective, Echoer(), lambda: None else: self.lastPerspective = self.perspectiveFactory(avatarId) self.lastPerspective.loggedIn = True return ( pb.IPerspective, self.lastPerspective, self.lastPerspective.logout) class MyView(pb.Viewable): def view_check(self, user): return isinstance(user, MyPerspective) class LeakyRealm(TestRealm): """ A realm which hangs onto a reference to the mind object in its logout function. """ def __init__(self, mindEater): """ Create a L{LeakyRealm}. @param mindEater: a callable that will be called with the C{mind} object when it is available """ self._mindEater = mindEater def requestAvatar(self, avatarId, mind, interface): self._mindEater(mind) persp = self.perspectiveFactory(avatarId) return (pb.IPerspective, persp, lambda : (mind, persp.logout())) class NewCredLeakTests(unittest.TestCase): """ Tests to try to trigger memory leaks. """ def test_logoutLeak(self): """ The server does not leak a reference when the client disconnects suddenly, even if the cred logout function forms a reference cycle with the perspective. """ # keep a weak reference to the mind object, which we can verify later # evaluates to None, thereby ensuring the reference leak is fixed. self.mindRef = None def setMindRef(mind): self.mindRef = weakref.ref(mind) clientBroker, serverBroker, pump = connectedServerAndClient( LeakyRealm(setMindRef)) # log in from the client connectionBroken = [] root = clientBroker.remoteForName("root") d = root.callRemote("login", 'guest') def cbResponse((challenge, challenger)): mind = SimpleRemote() return challenger.callRemote("respond", pb.respond(challenge, 'guest'), mind) d.addCallback(cbResponse) def connectionLost(_): pump.stop() # don't try to pump data anymore - it won't work connectionBroken.append(1) serverBroker.connectionLost(failure.Failure(RuntimeError("boom"))) d.addCallback(connectionLost) # flush out the response and connectionLost pump.flush() self.assertEqual(connectionBroken, [1]) # and check for lingering references - requestAvatar sets mindRef # to a weakref to the mind; this object should be gc'd, and thus # the ref should return None gc.collect() self.assertEqual(self.mindRef(), None) class NewCredTests(unittest.TestCase): """ Tests related to the L{twisted.cred} support in PB. """ def setUp(self): """ Create a portal with no checkers and wrap it around a simple test realm. Set up a PB server on a TCP port which serves perspectives using that portal. """ self.realm = TestRealm() self.portal = portal.Portal(self.realm) self.factory = ConnectionNotifyServerFactory(self.portal) self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1") self.portno = self.port.getHost().port def tearDown(self): """ Shut down the TCP port created by L{setUp}. """ return self.port.stopListening() def getFactoryAndRootObject(self, clientFactory=pb.PBClientFactory): """ Create a connection to the test server. @param clientFactory: the factory class used to create the connection. @return: a tuple (C{factory}, C{deferred}), where factory is an instance of C{clientFactory} and C{deferred} the L{Deferred} firing with the PB root object. """ factory = clientFactory() rootObjDeferred = factory.getRootObject() connector = reactor.connectTCP('127.0.0.1', self.portno, factory) self.addCleanup(connector.disconnect) return factory, rootObjDeferred def test_getRootObject(self): """ Assert only that L{PBClientFactory.getRootObject}'s Deferred fires with a L{RemoteReference}. """ factory, rootObjDeferred = self.getFactoryAndRootObject() def gotRootObject(rootObj): self.assertIsInstance(rootObj, pb.RemoteReference) disconnectedDeferred = Deferred() rootObj.notifyOnDisconnect(disconnectedDeferred.callback) factory.disconnect() return disconnectedDeferred return rootObjDeferred.addCallback(gotRootObject) def test_deadReferenceError(self): """ Test that when a connection is lost, calling a method on a RemoteReference obtained from it raises DeadReferenceError. """ factory, rootObjDeferred = self.getFactoryAndRootObject() def gotRootObject(rootObj): disconnectedDeferred = Deferred() rootObj.notifyOnDisconnect(disconnectedDeferred.callback) def lostConnection(ign): self.assertRaises( pb.DeadReferenceError, rootObj.callRemote, 'method') disconnectedDeferred.addCallback(lostConnection) factory.disconnect() return disconnectedDeferred return rootObjDeferred.addCallback(gotRootObject) def test_clientConnectionLost(self): """ Test that if the L{reconnecting} flag is passed with a True value then a remote call made from a disconnection notification callback gets a result successfully. """ class ReconnectOnce(pb.PBClientFactory): reconnectedAlready = False def clientConnectionLost(self, connector, reason): reconnecting = not self.reconnectedAlready self.reconnectedAlready = True if reconnecting: connector.connect() return pb.PBClientFactory.clientConnectionLost( self, connector, reason, reconnecting) factory, rootObjDeferred = self.getFactoryAndRootObject(ReconnectOnce) def gotRootObject(rootObj): self.assertIsInstance(rootObj, pb.RemoteReference) d = Deferred() rootObj.notifyOnDisconnect(d.callback) factory.disconnect() def disconnected(ign): d = factory.getRootObject() def gotAnotherRootObject(anotherRootObj): self.assertIsInstance(anotherRootObj, pb.RemoteReference) d = Deferred() anotherRootObj.notifyOnDisconnect(d.callback) factory.disconnect() return d return d.addCallback(gotAnotherRootObject) return d.addCallback(disconnected) return rootObjDeferred.addCallback(gotRootObject) def test_immediateClose(self): """ Test that if a Broker loses its connection without receiving any bytes, it doesn't raise any exceptions or log any errors. """ serverProto = self.factory.buildProtocol(('127.0.0.1', 12345)) serverProto.makeConnection(protocol.FileWrapper(StringIO())) serverProto.connectionLost(failure.Failure(main.CONNECTION_DONE)) def test_loginConnectionRefused(self): """ L{PBClientFactory.login} returns a L{Deferred} which is errbacked with the L{ConnectionRefusedError} if the underlying connection is refused. """ clientFactory = pb.PBClientFactory() loginDeferred = clientFactory.login( credentials.UsernamePassword("foo", "bar")) clientFactory.clientConnectionFailed( None, failure.Failure( ConnectionRefusedError("Test simulated refused connection"))) return self.assertFailure(loginDeferred, ConnectionRefusedError) def _disconnect(self, ignore, factory): """ Helper method disconnecting the given client factory and returning a C{Deferred} that will fire when the server connection has noticed the disconnection. """ disconnectedDeferred = Deferred() self.factory.protocolInstance.notifyOnDisconnect( lambda: disconnectedDeferred.callback(None)) factory.disconnect() return disconnectedDeferred def test_loginLogout(self): """ Test that login can be performed with IUsernamePassword credentials and that when the connection is dropped the avatar is logged out. """ self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass')) factory = pb.PBClientFactory() creds = credentials.UsernamePassword("user", "pass") # NOTE: real code probably won't need anything where we have the # "BRAINS!" argument, passing None is fine. We just do it here to # test that it is being passed. It is used to give additional info to # the realm to aid perspective creation, if you don't need that, # ignore it. mind = "BRAINS!" d = factory.login(creds, mind) def cbLogin(perspective): self.assertTrue(self.realm.lastPerspective.loggedIn) self.assertIsInstance(perspective, pb.RemoteReference) return self._disconnect(None, factory) d.addCallback(cbLogin) def cbLogout(ignored): self.assertTrue(self.realm.lastPerspective.loggedOut) d.addCallback(cbLogout) connector = reactor.connectTCP("127.0.0.1", self.portno, factory) self.addCleanup(connector.disconnect) return d def test_logoutAfterDecref(self): """ If a L{RemoteReference} to an L{IPerspective} avatar is decrefed and there remain no other references to the avatar on the server, the avatar is garbage collected and the logout method called. """ loggedOut = Deferred() class EventPerspective(pb.Avatar): """ An avatar which fires a Deferred when it is logged out. """ def __init__(self, avatarId): pass def logout(self): loggedOut.callback(None) self.realm.perspectiveFactory = EventPerspective self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(foo='bar')) factory = pb.PBClientFactory() d = factory.login( credentials.UsernamePassword('foo', 'bar'), "BRAINS!") def cbLoggedIn(avatar): # Just wait for the logout to happen, as it should since the # reference to the avatar will shortly no longer exists. return loggedOut d.addCallback(cbLoggedIn) def cbLoggedOut(ignored): # Verify that the server broker's _localCleanup dict isn't growing # without bound. self.assertEqual(self.factory.protocolInstance._localCleanup, {}) d.addCallback(cbLoggedOut) d.addCallback(self._disconnect, factory) connector = reactor.connectTCP("127.0.0.1", self.portno, factory) self.addCleanup(connector.disconnect) return d def test_concurrentLogin(self): """ Two different correct login attempts can be made on the same root object at the same time and produce two different resulting avatars. """ self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse( foo='bar', baz='quux')) factory = pb.PBClientFactory() firstLogin = factory.login( credentials.UsernamePassword('foo', 'bar'), "BRAINS!") secondLogin = factory.login( credentials.UsernamePassword('baz', 'quux'), "BRAINS!") d = gatherResults([firstLogin, secondLogin]) def cbLoggedIn((first, second)): return gatherResults([ first.callRemote('getAvatarId'), second.callRemote('getAvatarId')]) d.addCallback(cbLoggedIn) def cbAvatarIds((first, second)): self.assertEqual(first, 'foo') self.assertEqual(second, 'baz') d.addCallback(cbAvatarIds) d.addCallback(self._disconnect, factory) connector = reactor.connectTCP('127.0.0.1', self.portno, factory) self.addCleanup(connector.disconnect) return d def test_badUsernamePasswordLogin(self): """ Test that a login attempt with an invalid user or invalid password fails in the appropriate way. """ self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass')) factory = pb.PBClientFactory() firstLogin = factory.login( credentials.UsernamePassword('nosuchuser', 'pass')) secondLogin = factory.login( credentials.UsernamePassword('user', 'wrongpass')) self.assertFailure(firstLogin, UnauthorizedLogin) self.assertFailure(secondLogin, UnauthorizedLogin) d = gatherResults([firstLogin, secondLogin]) def cleanup(ignore): errors = self.flushLoggedErrors(UnauthorizedLogin) self.assertEqual(len(errors), 2) return self._disconnect(None, factory) d.addCallback(cleanup) connector = reactor.connectTCP("127.0.0.1", self.portno, factory) self.addCleanup(connector.disconnect) return d def test_anonymousLogin(self): """ Verify that a PB server using a portal configured with an checker which allows IAnonymous credentials can be logged into using IAnonymous credentials. """ self.portal.registerChecker(checkers.AllowAnonymousAccess()) factory = pb.PBClientFactory() d = factory.login(credentials.Anonymous(), "BRAINS!") def cbLoggedIn(perspective): return perspective.callRemote('echo', 123) d.addCallback(cbLoggedIn) d.addCallback(self.assertEqual, 123) d.addCallback(self._disconnect, factory) connector = reactor.connectTCP("127.0.0.1", self.portno, factory) self.addCleanup(connector.disconnect) return d def test_anonymousLoginNotPermitted(self): """ Verify that without an anonymous checker set up, anonymous login is rejected. """ self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass')) factory = pb.PBClientFactory() d = factory.login(credentials.Anonymous(), "BRAINS!") self.assertFailure(d, UnhandledCredentials) def cleanup(ignore): errors = self.flushLoggedErrors(UnhandledCredentials) self.assertEqual(len(errors), 1) return self._disconnect(None, factory) d.addCallback(cleanup) connector = reactor.connectTCP('127.0.0.1', self.portno, factory) self.addCleanup(connector.disconnect) return d def test_anonymousLoginWithMultipleCheckers(self): """ Like L{test_anonymousLogin} but against a portal with a checker for both IAnonymous and IUsernamePassword. """ self.portal.registerChecker(checkers.AllowAnonymousAccess()) self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass')) factory = pb.PBClientFactory() d = factory.login(credentials.Anonymous(), "BRAINS!") def cbLogin(perspective): return perspective.callRemote('echo', 123) d.addCallback(cbLogin) d.addCallback(self.assertEqual, 123) d.addCallback(self._disconnect, factory) connector = reactor.connectTCP('127.0.0.1', self.portno, factory) self.addCleanup(connector.disconnect) return d def test_authenticatedLoginWithMultipleCheckers(self): """ Like L{test_anonymousLoginWithMultipleCheckers} but check that username/password authentication works. """ self.portal.registerChecker(checkers.AllowAnonymousAccess()) self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass')) factory = pb.PBClientFactory() d = factory.login( credentials.UsernamePassword('user', 'pass'), "BRAINS!") def cbLogin(perspective): return perspective.callRemote('add', 100, 23) d.addCallback(cbLogin) d.addCallback(self.assertEqual, 123) d.addCallback(self._disconnect, factory) connector = reactor.connectTCP('127.0.0.1', self.portno, factory) self.addCleanup(connector.disconnect) return d def test_view(self): """ Verify that a viewpoint can be retrieved after authenticating with cred. """ self.portal.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass')) factory = pb.PBClientFactory() d = factory.login( credentials.UsernamePassword("user", "pass"), "BRAINS!") def cbLogin(perspective): return perspective.callRemote("getViewPoint") d.addCallback(cbLogin) def cbView(viewpoint): return viewpoint.callRemote("check") d.addCallback(cbView) d.addCallback(self.assertTrue) d.addCallback(self._disconnect, factory) connector = reactor.connectTCP("127.0.0.1", self.portno, factory) self.addCleanup(connector.disconnect) return d class NonSubclassingPerspective: implements(pb.IPerspective) def __init__(self, avatarId): pass # IPerspective implementation def perspectiveMessageReceived(self, broker, message, args, kwargs): args = broker.unserialize(args, self) kwargs = broker.unserialize(kwargs, self) return broker.serialize((message, args, kwargs)) # Methods required by TestRealm def logout(self): self.loggedOut = True class NSPTests(unittest.TestCase): """ Tests for authentication against a realm where the L{IPerspective} implementation is not a subclass of L{Avatar}. """ def setUp(self): self.realm = TestRealm() self.realm.perspectiveFactory = NonSubclassingPerspective self.portal = portal.Portal(self.realm) self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse() self.checker.addUser("user", "pass") self.portal.registerChecker(self.checker) self.factory = WrappingFactory(pb.PBServerFactory(self.portal)) self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1") self.addCleanup(self.port.stopListening) self.portno = self.port.getHost().port def test_NSP(self): """ An L{IPerspective} implementation which does not subclass L{Avatar} can expose remote methods for the client to call. """ factory = pb.PBClientFactory() d = factory.login(credentials.UsernamePassword('user', 'pass'), "BRAINS!") reactor.connectTCP('127.0.0.1', self.portno, factory) d.addCallback(lambda p: p.callRemote('ANYTHING', 'here', bar='baz')) d.addCallback(self.assertEqual, ('ANYTHING', ('here',), {'bar': 'baz'})) def cleanup(ignored): factory.disconnect() for p in self.factory.protocols: p.transport.loseConnection() d.addCallback(cleanup) return d class IForwarded(Interface): """ Interface used for testing L{util.LocalAsyncForwarder}. """ def forwardMe(): """ Simple synchronous method. """ def forwardDeferred(): """ Simple asynchronous method. """ class Forwarded: """ Test implementation of L{IForwarded}. @ivar forwarded: set if C{forwardMe} is called. @type forwarded: C{bool} @ivar unforwarded: set if C{dontForwardMe} is called. @type unforwarded: C{bool} """ implements(IForwarded) forwarded = False unforwarded = False def forwardMe(self): """ Set a local flag to test afterwards. """ self.forwarded = True def dontForwardMe(self): """ Set a local flag to test afterwards. This should not be called as it's not in the interface. """ self.unforwarded = True def forwardDeferred(self): """ Asynchronously return C{True}. """ return succeed(True) class SpreadUtilTests(unittest.TestCase): """ Tests for L{twisted.spread.util}. """ def test_sync(self): """ Call a synchronous method of a L{util.LocalAsRemote} object and check the result. """ o = LocalRemoteTest() self.assertEqual(o.callRemote("add1", 2), 3) def test_async(self): """ Call an asynchronous method of a L{util.LocalAsRemote} object and check the result. """ o = LocalRemoteTest() o = LocalRemoteTest() d = o.callRemote("add", 2, y=4) self.assertIsInstance(d, Deferred) d.addCallback(self.assertEqual, 6) return d def test_asyncFail(self): """ Test a asynchronous failure on a remote method call. """ o = LocalRemoteTest() d = o.callRemote("fail") def eb(f): self.assertTrue(isinstance(f, failure.Failure)) f.trap(RuntimeError) d.addCallbacks(lambda res: self.fail("supposed to fail"), eb) return d def test_remoteMethod(self): """ Test the C{remoteMethod} facility of L{util.LocalAsRemote}. """ o = LocalRemoteTest() m = o.remoteMethod("add1") self.assertEqual(m(3), 4) def test_localAsyncForwarder(self): """ Test a call to L{util.LocalAsyncForwarder} using L{Forwarded} local object. """ f = Forwarded() lf = util.LocalAsyncForwarder(f, IForwarded) lf.callRemote("forwardMe") self.assertTrue(f.forwarded) lf.callRemote("dontForwardMe") self.assertFalse(f.unforwarded) rr = lf.callRemote("forwardDeferred") l = [] rr.addCallback(l.append) self.assertEqual(l[0], 1) class PBWithSecurityOptionsTests(unittest.TestCase): """ Test security customization. """ def test_clientDefaultSecurityOptions(self): """ By default, client broker should use C{jelly.globalSecurity} as security settings. """ factory = pb.PBClientFactory() broker = factory.buildProtocol(None) self.assertIdentical(broker.security, jelly.globalSecurity) def test_serverDefaultSecurityOptions(self): """ By default, server broker should use C{jelly.globalSecurity} as security settings. """ factory = pb.PBServerFactory(Echoer()) broker = factory.buildProtocol(None) self.assertIdentical(broker.security, jelly.globalSecurity) def test_clientSecurityCustomization(self): """ Check that the security settings are passed from the client factory to the broker object. """ security = jelly.SecurityOptions() factory = pb.PBClientFactory(security=security) broker = factory.buildProtocol(None) self.assertIdentical(broker.security, security) def test_serverSecurityCustomization(self): """ Check that the security settings are passed from the server factory to the broker object. """ security = jelly.SecurityOptions() factory = pb.PBServerFactory(Echoer(), security=security) broker = factory.buildProtocol(None) self.assertIdentical(broker.security, security)
import sys, logging.config, os, time, socket, yaml, ssl from flask import ( Flask, request, jsonify, app, make_response, Response, json, Blueprint ) from werkzeug.exceptions import default_exceptions from werkzeug.exceptions import HTTPException from werkzeug.contrib.fixers import ProxyFix from flask_cors import CORS import flask_restful as restful from traceback import format_exc from jenova.components import db, create_app, Config, CallLogger from jenova.resources import ( # Resellers and clients resources ResellerListResource, ResellerDomainListResource, ResellerListByQueryResource, ResellerResource, ClientResource, ClientListResource, ResellerServicesListResource, # User, Auth, Scope and Permissions resources AuthenticationResource, PermissionsResource, UserListResource, UserChangeStateResource, UserResource, ScopeUserResource, ScopeListResource, ScopeListUserResource, ScopeResource, # Domain and Cos resources DomainResource, DomainServiceResource, DomainListResource, DomainServicePreAuthDelegationResource, CosResource, DomainServiceStateResource, DomainListServiceStateResource, DomainListByQueryResource, DomainCosResource, # Task resources TaskResource, # Services resources ServiceResource, # DNS resources DnsSOAResource, DnsRecordsResource, DnsRecordsBackupResource, # Notices resources NoticesResource, # Accounts resources ExternalAccountsResource, ExternalAccountsListResource, ExternalDomainStatusResource, # Reports resources ResellerReportResource, DomainReportResource, #Distribution List resources DistributionListsResource, DistributionListResource ) SKEY = 'changeme' logger = CallLogger.logger() # TODO: Review tasks (create domain) # TODO: More restful... Set Location header to conflicting problems # TODO: disable reseller must inactivate all users # TODO: Refactor http status code # TODO: Change str parameters to unicode one # TODO: all args must be strict! (Cannot accept unknown args!) # TODO: Convert post params to lowercase # TODO: create password strength # TODO: Reject request headers != application/json # TODO: Performance query adjustment: http://stackoverflow.com/questions/28280507/setup-relationship-one-to-one-in-flask-sqlalchemy def is_dev(): return os.environ.get('NODE_ENV') == 'development' try: #app = flask_app app = create_app() CORS(app, expose_headers=['Location']) api = restful.Api(app) main_config = Config.load() # Resellers/Clients logging.config.dictConfig(main_config['logger']) logger = logging.getLogger(__name__) # Resellers/Clients resources api.add_resource(ResellerListResource, '/resellers') api.add_resource(ResellerListByQueryResource, '/resellers/<by_name_query>') api.add_resource(ResellerResource, '/resellers/<target_reseller>') api.add_resource(DomainListByQueryResource, *[ '/clients/<client_name>/domains/<by_name_query>', '/resellers/<reseller_name>/domains/<by_name_query>', '/resellers/domains/<by_name_query>' ] ) api.add_resource(ResellerDomainListResource, '/resellers/<target_reseller>/domains') api.add_resource(ResellerServicesListResource, '/resellers/<target_reseller>/services') api.add_resource(ClientListResource, *[ '/clients', '/clients/<by_name_query>', '/resellers/<target_reseller>/clients', '/resellers/<target_reseller>/clients/<by_name_query>', ] ) api.add_resource(ClientResource, '/resellers/<target_reseller>/clients/<target_client>') # User, resources # Users belong to reseller (may be created only in reseller creation) or clients. api.add_resource(UserListResource, *[ '/users', '/users/<by_name_query>' ] ) api.add_resource(UserResource, '/users/<target_auth>') api.add_resource(UserChangeStateResource, *[ '/users/<target_auth>/globaladmin', '/users/<target_auth>/admin', '/users/<target_auth>/api' ] ) # Notices Resource api.add_resource(NoticesResource, *[ '/service/<target_service>/notices', '/service/<target_service>/notices/<notice_id>' ] ) # Scopes/Permissions resources # http://api.inova.com.br:8080/scopes/dns/users/speedhost/permissions # Scopes are unique. Has users and permissions bound into it api.add_resource(ScopeListResource, '/scopes') api.add_resource(ScopeResource, '/scopes/<scope_name>') api.add_resource(ScopeListUserResource, '/scopes/<scope_name>/users') api.add_resource(ScopeUserResource, '/scopes/<scope_name>/users/<user>') api.add_resource(PermissionsResource, *[ '/scopes/<scope_name>/users/<user>/permissions', '/scopes/<scope_name>/users/<user>/permissions/read', '/scopes/<scope_name>/users/<user>/permissions/write', '/scopes/<scope_name>/users/<user>/permissions/edit', '/scopes/<scope_name>/users/<user>/permissions/delete' ] ) # Reports api.add_resource(ResellerReportResource, '/reports/resellers/<target_reseller>') api.add_resource(DomainReportResource, '/reports/domains/<target_domain>/services/<target_service>') # External Domain Status api.add_resource(ExternalDomainStatusResource, '/services/<service_name>/domains/<domain_name>/status') # External Accounts Management api.add_resource(ExternalAccountsResource, '/services/<service_name>/domains/<domain_name>/accounts/<target_account>') api.add_resource(ExternalAccountsListResource, '/services/<service_name>/domains/<domain_name>/accounts') #External Accounts -> DistributionListResource api.add_resource(DistributionListsResource, '/services/<service_name>/domains/<domain_name>/dlists') api.add_resource(DistributionListResource, '/services/<service_name>/domains/<domain_name>/dlists/<dlist_name>') # Authentication resource api.add_resource(AuthenticationResource, *['/login', '/auth']) # Domain resources config_state = { 'main_config' : main_config } api.add_resource(DomainListResource, '/clients/<client_name>/domains') api.add_resource(DomainResource, '/clients/<client_name>/domains/<domain_name>') api.add_resource(DomainListServiceStateResource, '/clients/<client_name>/domains/<target_domain>/services') api.add_resource(DomainServiceResource, '/services/<service_name>/domains/<domain_name>', resource_class_kwargs = config_state) api.add_resource(DomainServiceStateResource, '/clients/<client_name>/domains/<target_domain>/services/<service_name>') # Task resources api.add_resource(TaskResource, '/tasks/<task_type>/id/<task_id>') api.add_resource(DomainServicePreAuthDelegationResource, '/services/<service_name>/domains/<domain_name>/preauth', resource_class_kwargs = config_state ) api.add_resource(DomainCosResource, '/services/<service_name>/domains/<domain_name>/cos') domain_endpoints = ['/clients/<client_name>/domains/<domain_name>', '/domains/<target_domain>'] # TODO: remake endpoints api.add_resource(ServiceResource, '/service/<target_service>') api.add_resource(CosResource, '/service/<service_name>/cos/<target_cos>') api.add_resource(DnsSOAResource, '/service/<service_name>/zone/<domain_name>') api.add_resource(DnsRecordsResource, *[ '/service/<service_name>/zone/<domain_name>/type/<dns_type>/name/<name>', '/service/<service_name>/zone/<domain_name>/type/<dns_type>/name/<name>/content/<content>/ttl/<ttl>' ] ) api.add_resource(DnsRecordsBackupResource, '/service/<service_name>/zone/<domain_name>/backup') # TODO: dns_type get resource. # TODO: all domains resource if is_dev(): while True: s = socket.socket() try: s.connect((os.environ['JNV_MDB_HOST'], 3306)) break except Exception, e: # TODO: logger HERE print 'Error connecting to database, sleeping for 5 seconds...', e time.sleep(6) #db.init_app(app) if is_dev(): with app.app_context(): db.create_all() from jenova.models import User, UserSchema, Scope, Permissions from jenova.components import Security from datetime import datetime import jwt if not User.query.filter_by(login=os.environ.get('AUTH_LOGIN')).first(): user = User( login = os.environ.get('AUTH_LOGIN'), name = 'QA Admin', email = 'sandro.mello@inova.net', password = Security.hash_password(os.environ.get('AUTH_PASSWORD')), api_enabled = True, global_admin = True ) # for scope_name in DEFAULT_SCOPES: # scope = Scope(name = scope_name) # scope.permissions = Permissions( # read = True, # write = True, # delete = True, # edit = True # ) # db.session.add(scope) # db.session.commit() #plain_secretkey, hashed_secretkey = os.environ.get('SECRETKEY'), os.environ.get('HASHED_SECRETKEY') #user.api_access = [ApiAccess(api_key=os.environ.get('APIKEY'), secret_key=hashed_secretkey, # comment='QA/DEV ADMIN USER')] db.session.add(user) db.session.commit() user = User.query.filter_by(login = os.environ.get('AUTH_LOGIN')).first() token = jwt.encode({'user' : UserSchema().dump(user).data}, SKEY, algorithm='HS256') logger.info(token) # support for wsgi containers: http://flask.pocoo.org/docs/0.10/deploying/wsgi-standalone/ app.wsgi_app = ProxyFix(app.wsgi_app) except KeyError, ex: print 'Could not find environment variable', ex sys.exit(1) except Exception, ex: print 'Error doing the initial config: %s\n%s' % (ex, format_exc()) sys.exit(1) if __name__ == '__main__': try: if is_dev(): app.run(host='0.0.0.0', port=8443, debug=True, threaded=True) context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.load_cert_chain(os.environ.get('JNV_SSL_CERT'), os.environ.get('JNV_SSL_KEY')) app.run(host='0.0.0.0', port=8443, debug=True, ssl_context=context, threaded=True) except Exception, e: print 'Error starting web app: %s' % e
from numpy import asarray, mean, sqrt, ndarray, amin, amax, concatenate, sum, zeros, maximum, \ argmin, newaxis, ones, delete, NaN, inf, isnan, clip, logical_or, unique, where, all from thunder.utils.serializable import Serializable from thunder.utils.common import checkParams, aslist from thunder.rdds.images import Images from thunder.rdds.series import Series class Source(Serializable, object): """ A single source, represented as a list of coordinates and other optional specifications. A source also has a set of lazily computed attributes useful for representing and comparing its geometry, such as center, bounding box, and bounding polygon. These properties will be computed lazily and made available as attributes when requested. Parameters ---------- coordinates : array-like List of 2D or 3D coordinates, can be a list of lists or array of shape (n,2) or (n,3) values : list or array-like Value (or weight) associated with each coordiante id : int or string Arbitrary specification per source, typically an index or string label Attributes ---------- center : list or array-like The coordinates of the center of the source polygon : list or array-like The coordinates of a polygon bounding the region (a convex hull) bbox : list or array-like Boundaries of the source (with the lowest values for all axes followed by the highest values) area : scalar The area of the region """ from zope.cachedescriptors import property def __init__(self, coordinates, values=None, id=None): self.coordinates = asarray(coordinates) if self.coordinates.ndim == 1 and len(self.coordinates) > 0: self.coordinates = asarray([self.coordinates]) if values is not None: self.values = asarray(values) if self.values.ndim == 0: self.values = asarray([self.values]) if not (len(self.coordinates) == len(self.values)): raise ValueError("Lengths of coordinates %g and values %g do not match" % (len(self.coordinates), len(self.values))) if id is not None: self.id = id @property.Lazy def center(self): """ Find the region center using a mean. """ # TODO Add option to use weights return mean(self.coordinates, axis=0) @property.Lazy def polygon(self): """ Find the bounding polygon as a convex hull """ # TODO Add option for simplification from scipy.spatial import ConvexHull if len(self.coordinates) >= 4: inds = ConvexHull(self.coordinates).vertices return self.coordinates[inds] else: return self.coordinates @property.Lazy def bbox(self): """ Find the bounding box. """ mn = amin(self.coordinates, axis=0) mx = amax(self.coordinates, axis=0) return concatenate((mn, mx)) @property.Lazy def area(self): """ Find the region area. """ return len(self.coordinates) def restore(self, skip=None): """ Remove all lazy properties, will force recomputation """ if skip is None: skip = [] elif isinstance(skip, str): skip = [skip] for prop in LAZY_ATTRIBUTES: if prop in self.__dict__.keys() and prop not in skip: del self.__dict__[prop] return self def distance(self, other, method='euclidean'): """ Distance between the center of this source and another. Parameters ---------- other : Source, or array-like Either another source, or the center coordinates of another source method : str Specify a distance measure to used for spatial distance between source centers. Current options include Euclidean distance ('euclidean') and L1-norm ('l1'). """ from numpy.linalg import norm checkParams(method, ['euclidean', 'l1']) if method == 'l1': order = 1 else: order = 2 if isinstance(other, Source): return norm(self.center - other.center, ord=order) elif isinstance(other, list) or isinstance(other, ndarray): return norm(self.center - asarray(other), ord=order) def overlap(self, other, method='fraction'): """ Compute the overlap between this source and other. Options are a symmetric measure of overlap based on the fraction of intersecting pixels relative to the union ('fraction'), an assymmetric measure of overlap that expresses detected intersecting pixels (relative to this source) using precision and recall rates ('rates'), or a correlation coefficient of the weights within the intersection (not defined for binary weights) ('correlation') Parameters ---------- other : Source The source to compute overlap with. method : str Which estimate of overlap to compute, options are 'fraction' (symmetric) 'rates' (asymmetric) or 'correlation' """ checkParams(method, ['fraction', 'rates', 'correlation']) coordsSelf = aslist(self.coordinates) coordsOther = aslist(other.coordinates) intersection = [a for a in coordsSelf if a in coordsOther] nhit = float(len(intersection)) ntotal = float(len(set([tuple(x) for x in coordsSelf] + [tuple(x) for x in coordsOther]))) if method == 'rates': recall = nhit / len(coordsSelf) precision = nhit / len(coordsOther) return recall, precision if method == 'fraction': return nhit / float(ntotal) if method == 'correlation': from scipy.stats import spearmanr if not (hasattr(self, 'values') and hasattr(other, 'values')): raise ValueError('Sources must have values to compute correlation') else: valuesSelf = aslist(self.values) valuesOther = aslist(other.values) if len(intersection) > 0: left = [v for v, c in zip(valuesSelf, coordsSelf) if c in coordsOther] right = [v for v, c in zip(valuesOther, coordsOther) if c in coordsSelf] rho, _ = spearmanr(left, right) else: rho = 0.0 return rho def merge(self, other): """ Combine this source with other """ self.coordinates = concatenate((self.coordinates, other.coordinates)) if hasattr(self, 'values'): self.values = concatenate((self.values, other.values)) return self def tolist(self): """ Convert array-like attributes to list """ import copy new = copy.copy(self) for prop in ["coordinates", "values", "center", "bbox", "polygon"]: if prop in self.__dict__.keys(): val = new.__getattribute__(prop) if val is not None and not isinstance(val, list): setattr(new, prop, val.tolist()) return new def toarray(self): """ Convert array-like attributes to ndarray """ import copy new = copy.copy(self) for prop in ["coordinates", "values", "center", "bbox", "polygon"]: if prop in self.__dict__.keys(): val = new.__getattribute__(prop) if val is not None and not isinstance(val, ndarray): setattr(new, prop, asarray(val)) return new def crop(self, minBound, maxBound): """ Crop a source by removing coordinates outside bounds. Follows normal slice indexing conventions. Parameters ---------- minBound : tuple Minimum or starting bounds for each axis maxBound : tuple Maximum or ending bounds for each axis """ coords = self.coordinates newid = self.id if hasattr(self, 'id') else None if hasattr(self, 'values') and self.values is not None: values = self.values inside = [(c, v) for c, v in zip(coords, values) if c not in coords] newcoords, newvalues = zip(*inside) return Source(coordinates=newcoords, values=newvalues, id=newid) else: newcoords = [c for c in coords if all(c >= minBound) and all(c < maxBound)] return Source(coordinates=newcoords, id=newid) def dilate(self, size): """ Dilate a source using morphological operators. Parameters ---------- size : int Size of dilation in pixels """ if size == 0: newcoords = self.coordinates else: size = (size * 2) + 1 if hasattr(self, 'values') and self.values is not None: raise AttributeError('Cannot dilate sources with values') from skimage.morphology import binary_dilation coords = self.coordinates extent = self.bbox[len(self.center):] - self.bbox[0:len(self.center)] + 1 + size * 2 m = zeros(extent) coords = (coords - self.bbox[0:len(self.center)] + size) m[coords.T.tolist()] = 1 m = binary_dilation(m, ones((size, size))) newcoords = asarray(where(m)).T + self.bbox[0:len(self.center)] - size newcoords = [c for c in newcoords if all(c >= 0)] newid = self.id if hasattr(self, 'id') else None return Source(coordinates=newcoords, id=newid) def exclude(self, other): """ Remove coordinates derived from another Source or an array. If other is an array, will remove coordinates of all non-zero elements from this source. If other is a source, will remove any matching coordinates. Parameters ---------- other : ndarray or Source Source to remove """ if isinstance(other, ndarray): coordsOther = asarray(where(other)).T else: coordsOther = aslist(other.coordinates) coordsSelf = aslist(self.coordinates) newid = self.id if hasattr(self, 'id') else None if hasattr(self, 'values') and self.values is not None: valuesSelf = self.values complement = [(c, v) for c, v in zip(coordsSelf, valuesSelf) if c not in coordsOther] newcoords, newvalues = zip(*complement) return Source(coordinates=newcoords, values=newvalues, id=newid) else: complement = [a for a in coordsSelf if a not in coordsOther] return Source(coordinates=complement, id=newid) def outline(self, inner, outer): """ Compute source outline by differencing two dilations Parameters ---------- inner : int Size of inner outline boundary (in pixels) outer : int Size of outer outline boundary (in pixels) """ return self.dilate(outer).exclude(self.dilate(inner)) def transform(self, data, collect=True): """ Extract series from data using a list of sources. Currently only supports averaging over coordinates. Params ------ data : Images or Series object The data from which to extract collect : boolean, optional, default = True Whether to collect to local array or keep as a Series """ if not (isinstance(data, Images) or isinstance(data, Series)): raise Exception("Input must either be Images or Series (or a subclass)") # TODO add support for weighting if isinstance(data, Images): output = data.meanByRegions([self.coordinates]).toSeries() else: output = data.meanOfRegion(self.coordinates) if collect: return output.collectValuesAsArray() else: return output def mask(self, dims=None, binary=True, outline=False, color=None): """ Construct a mask from a source, either locally or within a larger image. Parameters ---------- dims : list or tuple, optional, default = None Dimensions of large image in which to draw mask. If none, will restrict to the bounding box of the region. binary : boolean, optional, deafult = True Whether to incoporate values or only show a binary mask outline : boolean, optional, deafult = False Whether to only show outlines (derived using binary dilation) color : str or array-like RGB triplet (from 0 to 1) or named color (e.g. 'red', 'blue') """ from thunder import Colorize coords = self.coordinates if dims is None: extent = self.bbox[len(self.center):] - self.bbox[0:len(self.center)] + 1 m = zeros(extent) coords = (coords - self.bbox[0:len(self.center)]) else: m = zeros(dims) if hasattr(self, 'values') and self.values is not None and binary is False: m[coords.T.tolist()] = self.values else: m[coords.T.tolist()] = 1 if outline: from skimage.morphology import binary_dilation m = binary_dilation(m, ones((3, 3))) - m if color is not None: m = Colorize(cmap='indexed', colors=[color]).transform([m]) return m def inbounds(self, minBound, maxBound): """ Check what fraction of coordinates are inside given bounds Parameters ---------- minBound : list or tuple Minimum bounds maxBounds : list or tuple Maximum bounds """ minCheck = sum(self.coordinates < minBound, axis=1) > 0 maxCheck = sum(self.coordinates > maxBound, axis=1) > 0 fraction = 1 - sum(logical_or(minCheck, maxCheck)) / float(len(self.coordinates)) return fraction @staticmethod def fromMask(mask, id=None): """ Genearte a source from a mask. Assumes that the mask is an image where all non-zero elements are part of the source. If all non-zero elements are 1, then values will be ignored as the source is assumed to be binary. Parameters ---------- mask : array-like An array (typically 2D or 3D) containing the image mask id : int or string Arbitrary identifier for the source, typically an int or string """ mask = asarray(mask) u = unique(mask) if len(u) == 2 and u[0] == 0 and u[1] == 1: inds = where(mask) return Source(coordinates=asarray(zip(*inds)), id=id) else: inds = where(mask) values = mask[inds] coords = asarray(zip(*inds)) return Source(coordinates=coords, values=values, id=id) @staticmethod def fromCoordinates(coordinates, values=None, id=None): """ Generate a source from a list of coordinates and values. Parameters ---------- coordinates : array-like List coordinates as a list of lists or array of shape (n,2) or (n,3) values : list or array-like Value (or weight) associated with each coordiante id : int or string Arbitrary specification per source, typically an index or string label """ return Source(coordinates, values, id) def __repr__(self): s = self.__class__.__name__ for opt in ["id", "center", "bbox"]: if hasattr(self, opt): o = self.__getattribute__(opt) os = o.tolist() if isinstance(o, ndarray) else o s += '\n%s: %s' % (opt, repr(os)) return s class SourceModel(Serializable, object): """ A source model as a collection of extracted sources. Parameters ---------- sources : list or Sources or a single Source The identified sources See also -------- Source """ def __init__(self, sources): if isinstance(sources, Source): self.sources = [sources] elif isinstance(sources, list) and isinstance(sources[0], Source): self.sources = sources elif isinstance(sources, list): self.sources = [] for ss in sources: self.sources.append(Source(ss)) else: raise Exception("Input type not recognized, must be Source, list of Sources, " "or list of coordinates, got %s" % type(sources)) def __getitem__(self, entry): if not isinstance(entry, int): raise IndexError("Selection not recognized, must be Int, got %s" % type(entry)) return self.sources[entry] def combiner(self, prop, tolist=True): combined = [] for s in self.sources: p = getattr(s, prop) if tolist: p = p.tolist() combined.append(p) return combined @property def coordinates(self): """ List of coordinates combined across sources """ return self.combiner('coordinates') @property def values(self): """ List of coordinates combined across sources """ return self.combiner('values') @property def centers(self): """ Array of centers combined across sources """ return asarray(self.combiner('center')) @property def polygons(self): """ List of polygons combined across sources """ return self.combiner('polygon') @property def areas(self): """ List of areas combined across sources """ return self.combiner('area', tolist=False) @property def count(self): """ Number of sources """ return len(self.sources) def masks(self, dims=None, binary=True, outline=False, base=None, color=None, values=None, inds=None): """ Composite masks combined across sources as an image. Parameters ---------- dims : list or tuple, optional, default = None Dimensions of image in which to create masks, must either provide these or provide a base image binary : boolean, optional, deafult = True Whether to incoporate values or only show a binary mask outline : boolean, optional, deafult = False Whether to only show outlines (derived using binary dilation) base : SourceModel or array-like, optional, deafult = None Base background image on which to put masks, or another set of sources (usually for comparisons). color : str or LinearSegmentedColormap, optional, deafult = None Color to assign regions or colormap, will used named colormap or use the provided colormap, or assign randomly if 'random' values : array-like List of values to use with colormap inds : array-like, optional, deafult = None List of indices if only showing a subset """ from thunder import Colorize from matplotlib.cm import get_cmap from matplotlib.colors import LinearSegmentedColormap if inds is None: inds = range(0, self.count) if dims is None and base is None: raise Exception("Must provide image dimensions for composite masks " "or provide a base image.") if base is not None and isinstance(base, SourceModel): outline = True if dims is None and base is not None: dims = asarray(base).shape if isinstance(base, SourceModel): base = base.masks(dims, color='silver') elif isinstance(base, ndarray): base = Colorize(cmap='indexed', colors=['white']).transform([base]) if base is not None and color is None: color = 'deeppink' if isinstance(color, LinearSegmentedColormap) and values is not None: combined = zeros(list(dims) + [3]) colors = color(values)[:, 0:3] for i in inds: combined = maximum(self.sources[i].mask(dims, binary, outline, colors[i]), combined) if isinstance(color, str) and values is not None and not color == 'random': combined = zeros(list(dims) + [3]) colors = get_cmap(color, self.count)(values)[:, 0:3] for i in inds: combined = maximum(self.sources[i].mask(dims, binary, outline, colors[i]), combined) if color == 'random': combined = zeros(list(dims) + [3]) ncolors = min(self.count, 20) colors = get_cmap('rainbow', ncolors)(range(0, ncolors, 1))[:, 0:3] for i in inds: combined = maximum(self.sources[i].mask(dims, binary, outline, colors[i % len(colors)]), combined) elif values is None: combined = zeros(dims) for i in inds: combined = maximum(self.sources[i].mask(dims, binary, outline), combined) if isinstance(color, str) and color != 'random' and values is None: combined = Colorize(cmap='indexed', colors=[color]).transform([combined]) if base is not None: combined = maximum(base, combined) return combined def match(self, other, unique=False, minDistance=inf): """ For each source in self, find the index of the closest source in other. Uses euclidean distances between centers to determine distances. Can select nearest matches with or without enforcing uniqueness; if unique is False, will return the closest source in other for each source in self, possibly repeating sources multiple times if unique is True, will only allow each source in other to be matched with a single source in self, as determined by a greedy selection procedure. The minDistance parameter can be used to prevent far-away sources from being chosen during greedy selection. Params ------ other : SourceModel The source model to match sources to unique : boolean, optional, deafult = True Whether to only return unique matches minDistance : scalar, optiona, default = inf Minimum distance to use when selecting matches """ from scipy.spatial.distance import cdist targets = other.centers targetInds = range(0, len(targets)) matches = [] for s in self.sources: update = 1 # skip if no targets left, otherwise update if len(targets) == 0: update = 0 else: dists = cdist(targets, s.center[newaxis]) if dists.min() < minDistance: ind = argmin(dists) else: update = 0 # apply updates, otherwise add a nan if update == 1: matches.append(targetInds[ind]) if unique is True: targets = delete(targets, ind, axis=0) targetInds = delete(targetInds, ind) else: matches.append(NaN) return matches def distance(self, other, minDistance=inf): """ Compute the distance between each source in self and other. First estimates a matching source from other for each source in self, then computes the distance between the two sources. The matches are unique, using a greedy procedure, and minDistance can be used to prevent outliers during matching. Parameters ---------- other : SourceModel The sources to compute distances to minDistance : scalar, optiona, default = inf Minimum distance to use when matching indices """ inds = self.match(other, unique=True, minDistance=minDistance) d = [] for jj, ii in enumerate(inds): if ii is not NaN: d.append(self[jj].distance(other[ii])) else: d.append(NaN) return asarray(d) def overlap(self, other, method='fraction', minDistance=inf): """ Estimate overlap between sources in self and other. Will compute the similarity of sources in self that are found in other, based on either source pixel overlap or correlation. Parameters ---------- other : SourceModel The sources to compare to method : str, optional, default = 'fraction" Method to use when computing overlap between sources ('fraction', 'rates', or 'correlation') minDistance : scalar, optional, default = inf Minimum distance to use when matching indices """ inds = self.match(other, unique=True, minDistance=minDistance) d = [] for jj, ii in enumerate(inds): if ii is not NaN: d.append(self[jj].overlap(other[ii], method=method)) else: if method == 'rates': d.append((NaN, NaN)) else: d.append(NaN) return asarray(d) def similarity(self, other, metric='distance', thresh=5, minDistance=inf): """ Estimate similarity to another set of sources using recall and precision. Will compute the number of sources in self that are also in other, based on a given distance metric and a threshold. The recall rate is the number of matches divided by the number in self, and the precision rate is the number of matches divided by the number in other. Typically self is ground truth and other is an estimate. The F score is defined as 2 * (recall * precision) / (recall + precision) Before computing metrics, all sources in self are matched to other, and a minimum distance can be set to control matching. Parameters ---------- other : SourceModel The sources to compare to. metric : str, optional, default = 'distance' Metric to use when computing distances, options include 'distance' and 'overlap' thresh : scalar, optional, default = 5 The distance below which a source is considered found. minDistance : scalar, optional, default = inf Minimum distance to use when matching indices. """ checkParams(metric, ['distance', 'overlap']) if metric == 'distance': # when evaluating distances, # minimum distance should be the threshold if minDistance == inf: minDistance = thresh vals = self.distance(other, minDistance=minDistance) vals[isnan(vals)] = inf compare = lambda x: x < thresh elif metric == 'overlap': vals = self.overlap(other, method='fraction', minDistance=minDistance) vals[isnan(vals)] = 0 compare = lambda x: x > thresh else: raise Exception("Metric not recognized") recall = sum(map(compare, vals)) / float(self.count) precision = sum(map(compare, vals)) / float(other.count) score = 2 * (recall * precision) / (recall + precision) return recall, precision, score def transform(self, data, collect=True): """ Extract series from data using a list of sources. Currently only supports simple averaging over coordinates. Params ------ data : Images or Series object The data from which to extract signals collect : boolean, optional, default = True Whether to collect to local array or keep as a Series """ if not (isinstance(data, Images) or isinstance(data, Series)): raise Exception("Input must either be Images or Series (or a subclass)") # TODO add support for weighting if isinstance(data, Images): output = data.meanByRegions(self.coordinates).toSeries() else: output = data.meanByRegions(self.coordinates) if collect: return output.collectValuesAsArray() else: return output def clean(self, cleaners=None): """ Apply one or more cleaners to sources, returning filtered sources Parameters ---------- cleaners : Cleaner or list of Cleaners, optional, default = None Which cleaners to apply, if None, will apply BasicCleaner with defaults """ from thunder.extraction.cleaners import Cleaner, BasicCleaner from copy import copy if isinstance(cleaners, list): for c in cleaners: if not isinstance(c, Cleaner): raise Exception("List must only contain Cleaners") elif isinstance(cleaners, Cleaner): cleaners = [cleaners] elif cleaners is None: cleaners = [BasicCleaner()] else: raise Exception("Must provide Cleaner or list of Cleaners, got %s" % type(cleaners)) newmodel = copy(self) for c in cleaners: newmodel = c.clean(newmodel) return newmodel def dilate(self, size): """ Dilate all sources using morphological operators Parameters ---------- size : int Size of dilation in pixels """ return SourceModel([s.dilate(size) for s in self.sources]) def outline(self, inner, outer): """ Outline all sources inner : int Size of inner outline boundary (in pixels) outer : int Size of outer outline boundary (in pixels) """ return SourceModel([s.outline(inner, outer) for s in self.sources]) def crop(self, minBound, maxBound): """ Crop all sources by removing coordinates outside of bounds Parameters ---------- minBound : tuple Minimum or starting bounds for each axis maxBound : tuple Maximum or ending bounds for each axis """ return SourceModel([s.crop(minBound, maxBound) for s in self.sources]) def save(self, f, include=None, overwrite=False, **kwargs): """ Custom save to file with simplified, human-readable output, and selection of lazy attributes. """ import copy output = copy.deepcopy(self) if isinstance(include, str): include = [include] if include is not None: for prop in include: map(lambda s: getattr(s, prop), output.sources) output.sources = map(lambda s: s.restore(include).tolist(), output.sources) simplify = lambda d: d['sources']['py/homogeneousList']['data'] super(SourceModel, output).save(f, simplify=simplify, overwrite=overwrite, **kwargs) @classmethod def load(cls, f, **kwargs): """ Custom load from file to handle simplified, human-readable output """ unsimplify = lambda d: {'sources': { 'py/homogeneousList': {'data': d, 'module': 'thunder.extraction.source', 'type': 'Source'}}} output = super(SourceModel, cls).load(f, unsimplify=unsimplify) output.sources = map(lambda s: s.toarray(), output.sources) return output @classmethod def deserialize(cls, d, **kwargs): """ Custom load from JSON to handle simplified, human-readable output """ unsimplify = lambda d: {'sources': { 'py/homogeneousList': {'data': d, 'module': 'thunder.extraction.source', 'type': 'Source'}}} output = super(SourceModel, cls).deserialize(d, unsimplify=unsimplify) output.sources = map(lambda s: s.toarray(), output.sources) return output def __repr__(self): s = self.__class__.__name__ s += '\n%g sources' % (len(self.sources)) return s LAZY_ATTRIBUTES = ["center", "polygon", "bbox", "area"]
# -*- coding: utf-8 -*- """ werkzeug.serving ~~~~~~~~~~~~~~~~ There are many ways to serve a WSGI application. While you're developing it you usually don't want a full blown webserver like Apache but a simple standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in the standard library. If you're using older versions of Python you can download the package from the cheeseshop. However there are some caveats. Sourcecode won't reload itself when changed and each time you kill the server using ``^C`` you get an `KeyboardInterrupt` error. While the latter is easy to solve the first one can be a pain in the ass in some situations. The easiest way is creating a small ``start-myproject.py`` that runs the application:: #!/usr/bin/env python # -*- coding: utf-8 -*- from myproject import make_app from werkzeug.serving import run_simple app = make_app(...) run_simple('localhost', 8080, app, use_reloader=True) You can also pass it a `extra_files` keyword argument with a list of additional files (like configuration files) you want to observe. For bigger applications you should consider using `werkzeug.script` instead of a simple start file. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import os import socket import sys import signal from ._compat import PY2 try: import ssl except ImportError: class _SslDummy(object): def __getattr__(self, name): raise RuntimeError('SSL support unavailable') ssl = _SslDummy() def _get_openssl_crypto_module(): try: from OpenSSL import crypto except ImportError: raise TypeError('Using ad-hoc certificates requires the pyOpenSSL ' 'library.') else: return crypto try: from SocketServer import ThreadingMixIn, ForkingMixIn from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler except ImportError: from socketserver import ThreadingMixIn, ForkingMixIn from http.server import HTTPServer, BaseHTTPRequestHandler import werkzeug from werkzeug._internal import _log from werkzeug._compat import reraise, wsgi_encoding_dance from werkzeug.urls import url_parse, url_unquote from werkzeug.exceptions import InternalServerError LISTEN_QUEUE = 128 can_open_by_fd = hasattr(socket, 'fromfd') class WSGIRequestHandler(BaseHTTPRequestHandler, object): """A request handler that implements WSGI dispatching.""" @property def server_version(self): return 'Werkzeug/' + werkzeug.__version__ def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.client_address[0], 'REMOTE_PORT': self.client_address[1], 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value if request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ def run_wsgi(self): if self.headers.get('Expect', '').lower().strip() == '100-continue': self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n') self.environ = environ = self.make_environ() headers_set = [] headers_sent = [] def write(data): assert headers_set, 'write() before start_response' if not headers_sent: status, response_headers = headers_sent[:] = headers_set try: code, msg = status.split(None, 1) except ValueError: code, msg = status, "" self.send_response(int(code), msg) header_keys = set() for key, value in response_headers: self.send_header(key, value) key = key.lower() header_keys.add(key) if 'content-length' not in header_keys: self.close_connection = True self.send_header('Connection', 'close') if 'server' not in header_keys: self.send_header('Server', self.version_string()) if 'date' not in header_keys: self.send_header('Date', self.date_time_string()) self.end_headers() assert isinstance(data, bytes), 'applications must write bytes' self.wfile.write(data) self.wfile.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: reraise(*exc_info) finally: exc_info = None elif headers_set: raise AssertionError('Headers already set') headers_set[:] = [status, response_headers] return write def execute(app): application_iter = app(environ, start_response) try: for data in application_iter: write(data) if not headers_sent: write(b'') finally: if hasattr(application_iter, 'close'): application_iter.close() application_iter = None try: execute(self.server.app) except (socket.error, socket.timeout) as e: self.connection_dropped(e, environ) except Exception: if self.server.passthrough_errors: raise from werkzeug.debug.tbtools import get_current_traceback traceback = get_current_traceback(ignore_system_exceptions=True) try: # if we haven't yet sent the headers but they are set # we roll back to be able to set them again. if not headers_sent: del headers_set[:] execute(InternalServerError()) except Exception: pass self.server.log('error', 'Error on request:\n%s', traceback.plaintext) def handle(self): """Handles a request ignoring dropped connections.""" rv = None try: rv = BaseHTTPRequestHandler.handle(self) except (socket.error, socket.timeout) as e: self.connection_dropped(e) except Exception: if self.server.ssl_context is None or not is_ssl_error(): raise if self.server.shutdown_signal: self.initiate_shutdown() return rv def initiate_shutdown(self): """A horrible, horrible way to kill the server for Python 2.6 and later. It's the best we can do. """ # Windows does not provide SIGKILL, go with SIGTERM then. sig = getattr(signal, 'SIGKILL', signal.SIGTERM) # reloader active if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': os.kill(os.getpid(), sig) # python 2.7 self.server._BaseServer__shutdown_request = True # python 2.6 self.server._BaseServer__serving = False def connection_dropped(self, error, environ=None): """Called if the connection was closed by the client. By default nothing happens. """ def handle_one_request(self): """Handle a single HTTP request.""" self.raw_requestline = self.rfile.readline() if not self.raw_requestline: self.close_connection = 1 elif self.parse_request(): return self.run_wsgi() def send_response(self, code, message=None): """Send the response header and log the response code.""" self.log_request(code) if message is None: message = code in self.responses and self.responses[code][0] or '' if self.request_version != 'HTTP/0.9': hdr = "%s %d %s\r\n" % (self.protocol_version, code, message) self.wfile.write(hdr.encode('ascii')) def version_string(self): return BaseHTTPRequestHandler.version_string(self).strip() def address_string(self): return self.environ['REMOTE_ADDR'] def log_request(self, code='-', size='-'): self.log('info', '"%s" %s %s', self.requestline, code, size) def log_error(self, *args): self.log('error', *args) def log_message(self, format, *args): self.log('info', format, *args) def log(self, type, message, *args): _log(type, '%s - - [%s] %s\n' % (self.address_string(), self.log_date_time_string(), message % args)) #: backwards compatible name if someone is subclassing it BaseRequestHandler = WSGIRequestHandler def generate_adhoc_ssl_pair(cn=None): from random import random crypto = _get_openssl_crypto_module() # pretty damn sure that this is not actually accepted by anyone if cn is None: cn = '*' cert = crypto.X509() cert.set_serial_number(int(random() * sys.maxsize)) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(60 * 60 * 24 * 365) subject = cert.get_subject() subject.CN = cn subject.O = 'Dummy Certificate' issuer = cert.get_issuer() issuer.CN = 'Untrusted Authority' issuer.O = 'Self-Signed' pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 1024) cert.set_pubkey(pkey) cert.sign(pkey, 'md5') return cert, pkey def make_ssl_devcert(base_path, host=None, cn=None): """Creates an SSL key for development. This should be used instead of the ``'adhoc'`` key which generates a new cert on each server start. It accepts a path for where it should store the key and cert and either a host or CN. If a host is given it will use the CN ``*.host/CN=host``. For more information see :func:`run_simple`. .. versionadded:: 0.9 :param base_path: the path to the certificate and key. The extension ``.crt`` is added for the certificate, ``.key`` is added for the key. :param host: the name of the host. This can be used as an alternative for the `cn`. :param cn: the `CN` to use. """ from OpenSSL import crypto if host is not None: cn = '*.%s/CN=%s' % (host, host) cert, pkey = generate_adhoc_ssl_pair(cn=cn) cert_file = base_path + '.crt' pkey_file = base_path + '.key' with open(cert_file, 'wb') as f: f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) with open(pkey_file, 'wb') as f: f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) return cert_file, pkey_file def generate_adhoc_ssl_context(): """Generates an adhoc SSL context for the development server.""" crypto = _get_openssl_crypto_module() import tempfile import atexit cert, pkey = generate_adhoc_ssl_pair() cert_handle, cert_file = tempfile.mkstemp() pkey_handle, pkey_file = tempfile.mkstemp() atexit.register(os.remove, pkey_file) atexit.register(os.remove, cert_file) os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) os.close(cert_handle) os.close(pkey_handle) ctx = load_ssl_context(cert_file, pkey_file) return ctx def load_ssl_context(cert_file, pkey_file=None, protocol=None): """Loads SSL context from cert/private key files and optional protocol. Many parameters are directly taken from the API of :py:class:`ssl.SSLContext`. :param cert_file: Path of the certificate to use. :param pkey_file: Path of the private key to use. If not given, the key will be obtained from the certificate file. :param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl`` module. Defaults to ``PROTOCOL_SSLv23``. """ if protocol is None: protocol = ssl.PROTOCOL_SSLv23 ctx = _SSLContext(protocol) ctx.load_cert_chain(cert_file, pkey_file) return ctx class _SSLContext(object): '''A dummy class with a small subset of Python3's ``ssl.SSLContext``, only intended to be used with and by Werkzeug.''' def __init__(self, protocol): self._protocol = protocol self._certfile = None self._keyfile = None self._password = None def load_cert_chain(self, certfile, keyfile=None, password=None): self._certfile = certfile self._keyfile = keyfile or certfile self._password = password def wrap_socket(self, sock, **kwargs): # If we are on Python 2 the return value from socket.fromfd # is an internal socket object but what we need for ssl wrap # is the wrapper around it :( if PY2 and not isinstance(sock, socket.socket): sock = socket.socket(sock.family, sock.type, sock.proto, sock) return ssl.wrap_socket(sock, keyfile=self._keyfile, certfile=self._certfile, ssl_version=self._protocol, **kwargs) def is_ssl_error(error=None): """Checks if the given error (or the current one) is an SSL error.""" exc_types = (ssl.SSLError,) try: from OpenSSL.SSL import Error exc_types += (Error,) except ImportError: pass if error is None: error = sys.exc_info()[1] return isinstance(error, exc_types) def select_ip_version(host, port): """Returns AF_INET4 or AF_INET6 depending on where to connect to.""" # disabled due to problems with current ipv6 implementations # and various operating systems. Probably this code also is # not supposed to work, but I can't come up with any other # ways to implement this. # try: # info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, # socket.SOCK_STREAM, 0, # socket.AI_PASSIVE) # if info: # return info[0][0] # except socket.gaierror: # pass if ':' in host and hasattr(socket, 'AF_INET6'): return socket.AF_INET6 return socket.AF_INET class BaseWSGIServer(HTTPServer, object): """Simple single-threaded, single-process WSGI server.""" multithread = False multiprocess = False request_queue_size = LISTEN_QUEUE def __init__(self, host, port, app, handler=None, passthrough_errors=False, ssl_context=None, fd=None): if handler is None: handler = WSGIRequestHandler self.address_family = select_ip_version(host, port) if fd is not None: real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM) port = 0 HTTPServer.__init__(self, (host, int(port)), handler) self.app = app self.passthrough_errors = passthrough_errors self.shutdown_signal = False self.host = host self.port = port # Patch in the original socket. if fd is not None: self.socket.close() self.socket = real_sock self.server_address = self.socket.getsockname() if ssl_context is not None: if isinstance(ssl_context, tuple): ssl_context = load_ssl_context(*ssl_context) if ssl_context == 'adhoc': ssl_context = generate_adhoc_ssl_context() self.socket = ssl_context.wrap_socket(self.socket, server_side=True) self.ssl_context = ssl_context else: self.ssl_context = None def log(self, type, message, *args): _log(type, message, *args) def serve_forever(self): self.shutdown_signal = False try: if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': display_hostname = self.host != '*' and self.host or 'localhost' if ':' in display_hostname: display_hostname = '[%s]' % display_hostname quit_msg = '(Press CTRL+C to quit)' _log('info', ' * Running on %s://%s:%d/ %s', self.ssl_context is None and 'http' or 'https', display_hostname, self.port, quit_msg) HTTPServer.serve_forever(self) except KeyboardInterrupt: pass finally: self.server_close() def handle_error(self, request, client_address): if self.passthrough_errors: raise else: return HTTPServer.handle_error(self, request, client_address) def get_request(self): con, info = self.socket.accept() return con, info class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): """A WSGI server that does threading.""" multithread = True class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer): """A WSGI server that does forking.""" multiprocess = True def __init__(self, host, port, app, processes=40, handler=None, passthrough_errors=False, ssl_context=None, fd=None): BaseWSGIServer.__init__(self, host, port, app, handler, passthrough_errors, ssl_context, fd) self.max_children = processes def make_server(host=None, port=None, app=None, threaded=False, processes=1, request_handler=None, passthrough_errors=False, ssl_context=None, fd=None): """Create a new server instance that is either threaded, or forks or just processes one request after another. """ if threaded and processes > 1: raise ValueError("cannot have a multithreaded and " "multi process server.") elif threaded: return ThreadedWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd) elif processes > 1: return ForkingWSGIServer(host, port, app, processes, request_handler, passthrough_errors, ssl_context, fd=fd) else: return BaseWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd) def is_running_from_reloader(): """Checks if the application is running from within the Werkzeug reloader subprocess. .. versionadded:: 0.10 """ return os.environ.get('WERKZEUG_RUN_MAIN') == 'true' def run_simple(hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, reloader_type='auto', threaded=False, processes=1, request_handler=None, static_files=None, passthrough_errors=False, ssl_context=None): """Start a WSGI application. Optional features include a reloader, multithreading and fork support. This function has a command-line interface too:: python -m werkzeug.serving --help .. versionadded:: 0.5 `static_files` was added to simplify serving of static files as well as `passthrough_errors`. .. versionadded:: 0.6 support for SSL was added. .. versionadded:: 0.8 Added support for automatically loading a SSL context from certificate file and private key. .. versionadded:: 0.9 Added command-line interface. .. versionadded:: 0.10 Improved the reloader and added support for changing the backend through the `reloader_type` parameter. See :ref:`reloader` for more information. :param hostname: The host for the application. eg: ``'localhost'`` :param port: The port for the server. eg: ``8080`` :param application: the WSGI application to execute :param use_reloader: should the server automatically restart the python process if modules were changed? :param use_debugger: should the werkzeug debugging system be used? :param use_evalex: should the exception evaluation feature be enabled? :param extra_files: a list of files the reloader should watch additionally to the modules. For example configuration files. :param reloader_interval: the interval for the reloader in seconds. :param reloader_type: the type of reloader to use. The default is auto detection. Valid values are ``'stat'`` and ``'watchdog'``. See :ref:`reloader` for more information. :param threaded: should the process handle each request in a separate thread? :param processes: if greater than 1 then handle each request in a new process up to this maximum number of concurrent processes. :param request_handler: optional parameter that can be used to replace the default one. You can use this to replace it with a different :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass. :param static_files: a dict of paths for static files. This works exactly like :class:`SharedDataMiddleware`, it's actually just wrapping the application in that middleware before serving. :param passthrough_errors: set this to `True` to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.) :param ssl_context: an SSL context for the connection. Either an :class:`ssl.SSLContext`, a tuple in the form ``(cert_file, pkey_file)``, the string ``'adhoc'`` if the server should automatically create one, or ``None`` to disable SSL (which is the default). """ if use_debugger: from werkzeug.debug import DebuggedApplication application = DebuggedApplication(application, use_evalex) if static_files: from werkzeug.wsgi import SharedDataMiddleware application = SharedDataMiddleware(application, static_files) def inner(): try: fd = int(os.environ['WERKZEUG_SERVER_FD']) except (LookupError, ValueError): fd = None make_server(hostname, port, application, threaded, processes, request_handler, passthrough_errors, ssl_context, fd=fd).serve_forever() if use_reloader: # If we're not running already in the subprocess that is the # reloader we want to open up a socket early to make sure the # port is actually available. if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': if port == 0 and not can_open_by_fd: raise ValueError('Cannot bind to a random port with enabled ' 'reloader if the Python interpreter does ' 'not support socket opening by fd.') # Create and destroy a socket so that any exceptions are # raised before we spawn a separate Python interpreter and # lose this ability. address_family = select_ip_version(hostname, port) s = socket.socket(address_family, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((hostname, port)) if hasattr(s, 'set_inheritable'): s.set_inheritable(True) # If we can open the socket by file descriptor, then we can just # reuse this one and our socket will survive the restarts. if can_open_by_fd: os.environ['WERKZEUG_SERVER_FD'] = str(s.fileno()) s.listen(LISTEN_QUEUE) else: s.close() from ._reloader import run_with_reloader run_with_reloader(inner, extra_files, reloader_interval, reloader_type) else: inner() def run_with_reloader(*args, **kwargs): # People keep using undocumented APIs. Do not use this function # please, we do not guarantee that it continues working. from ._reloader import run_with_reloader return run_with_reloader(*args, **kwargs) def main(): '''A simple command-line interface for :py:func:`run_simple`.''' # in contrast to argparse, this works at least under Python < 2.7 import optparse from werkzeug.utils import import_string parser = optparse.OptionParser( usage='Usage: %prog [options] app_module:app_object') parser.add_option('-b', '--bind', dest='address', help='The hostname:port the app should listen on.') parser.add_option('-d', '--debug', dest='use_debugger', action='store_true', default=False, help='Use Werkzeug\'s debugger.') parser.add_option('-r', '--reload', dest='use_reloader', action='store_true', default=False, help='Reload Python process if modules change.') options, args = parser.parse_args() hostname, port = None, None if options.address: address = options.address.split(':') hostname = address[0] if len(address) > 1: port = address[1] if len(args) != 1: sys.stdout.write('No application supplied, or too much. See --help\n') sys.exit(1) app = import_string(args[0]) run_simple( hostname=(hostname or '127.0.0.1'), port=int(port or 5000), application=app, use_reloader=options.use_reloader, use_debugger=options.use_debugger ) if __name__ == '__main__': main()
"""Train the models """ from __future__ import absolute_import from __future__ import print_function from keras.callbacks import EarlyStopping, History, TensorBoard, ModelCheckpoint from keras.models import load_model import hyperopt from hyperopt.utils import coarse_utcnow from hyperopt.mongoexp import MongoTrials import kopt.eval_metrics as ce from kopt.utils import write_json, merge_dicts, _to_string from kopt.model_data import (subset, split_train_test_idx, split_KFold_idx) from kopt.config import db_host, db_port, save_dir from datetime import datetime, timedelta from uuid import uuid4 from hyperopt import STATUS_OK import numpy as np import pandas as pd from copy import deepcopy import os import glob import pprint import logging logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s') logger = logging.getLogger() logger.setLevel(logging.INFO) def test_fn(fn, hyper_params, n_train=1000, save_model='best', tmp_dir="/tmp/kopt_test/", custom_objects=None): """Test the correctness of the compiled objective function (CompileFN). I will also test model saving/loading from disk. # Arguments fn: CompileFN instance hyper_params: pyll graph of hyper-parameters - as later provided to `hyperopt.fmin` n_train: int, number of training points tmp_dir: Temporary path where to write the trained model. save_model: If not None, the trained model is saved to a temporary directory If save_model="best", save the best model using `keras.callbacks.ModelCheckpoint`, and if save_model="last", save the model after training it. custom_objects: argument passed to load_model - Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. """ def wrap_data_fn(data_fn, n_train=100): def new_data_fn(*args, **kwargs): data = data_fn(*args, **kwargs) train = data[0] train = subset(train, idx=np.arange(min(n_train, train[1].shape[0]))) return train, return new_data_fn start_time = datetime.now() fn = deepcopy(fn) hyper_params = deepcopy(hyper_params) fn.save_dir = tmp_dir fn.save_model = save_model fn.data_fn = wrap_data_fn(fn.data_fn, n_train) # sample from hyper_params param = hyperopt.pyll.stochastic.sample(hyper_params) # overwrite the number of epochs if param.get("fit") is None: param["fit"] = {} param["fit"]["epochs"] = 1 # correct execution res = fn(param) print("Returned value:") pprint.pprint(res) assert res["status"] == STATUS_OK if save_model: # correct model loading model_path = max(glob.iglob(fn.save_dir_exp + '/train_models/*.h5'), key=os.path.getctime) assert datetime.fromtimestamp(os.path.getctime(model_path)) > start_time load_model(model_path, custom_objects=custom_objects) class KMongoTrials(MongoTrials): """`hyperopt.MonoTrials` extended with the following methods: - get_trial(tid) - Retrieve trial by tid (Trial ID). - get_param(tid) - Retrieve used hyper-parameters for a trial. - best_trial_tid(rank=0) - Return the trial with lowest loss. - rank - rank=0 means the best model, rank=1 means second best, ... - optimal_epochs(tid) - Number of optimal epochs (after early-stopping) - delete_running(timeout_last_refresh=0, dry_run=False) - Delete jobs stalled in the running state for too long - timeout_last_refresh, int: number of seconds - dry_run, bool: If True, just simulate the removal but don't actually perform it. - valid_tid() - List all valid tid's - train_history(tid=None) - Get train history as pd.DataFrame with columns: `(epoch, loss, val_loss, ...)` - tid: Trial ID or list of trial ID's. If None, report for all trial ID's. - get_ok_results - Return a list of trial results with an "ok" status - load_model(tid) - Load a Keras model of a tid. - as_df - Returns a tidy `pandas.DataFrame` of the trials database. # Arguments db_name: str, MongoTrials database name exp_name: strm, MongoTrials experiment name ip: str, MongoDB IP address. port: int, MongoDB port. kill_timeout: int, Maximum runtime of a job (in seconds) before it gets killed. None for infinite. **kwargs: Additional keyword arguments passed to the `hyperopt.MongoTrials` constructor. """ def __init__(self, db_name, exp_name, ip=db_host(), port=db_port(), kill_timeout=None, **kwargs): self.kill_timeout = kill_timeout if self.kill_timeout is not None and self.kill_timeout < 60: logger.warning("kill_timeout < 60 -> Very short time for " + "each job to complete before it gets killed!") super(KMongoTrials, self).__init__( 'mongo://{ip}:{p}/{n}/jobs'.format(ip=ip, p=port, n=db_name), exp_key=exp_name, **kwargs) def get_trial(self, tid): """Retrieve trial by tid """ lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid] def get_param(self, tid): # TODO - return a dictionary - add .to_dict() return self.get_trial(tid)["result"]["param"] def best_trial_tid(self, rank=0): """Get tid of the best trial rank=0 means the best model rank=1 means second best ... """ candidates = [t for t in self.trials if t['result']['status'] == STATUS_OK] if len(candidates) == 0: return None losses = [float(t['result']['loss']) for t in candidates] assert not np.any(np.isnan(losses)) lid = np.where(np.argsort(losses).argsort() == rank)[0][0] return candidates[lid]["tid"] def optimal_epochs(self, tid): trial = self.get_trial(tid) patience = trial["result"]["param"]["fit"]["patience"] epochs = trial["result"]["param"]["fit"]["epochs"] def optimal_len(hist): c_epoch = max(hist["loss"]["epoch"]) + 1 if c_epoch == epochs: return epochs else: return c_epoch - patience hist = trial["result"]["history"] if isinstance(hist, list): return int(np.floor(np.array([optimal_len(h) for h in hist]).mean())) else: return optimal_len(hist) # def refresh(self): # """Extends the original object # """ # self.refresh_tids(None) # if self.kill_timeout is not None: # # TODO - remove dry_run # self.delete_running(self.kill_timeout, dry_run=True) def count_by_state_unsynced(self, arg): """Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long """ if self.kill_timeout is not None: self.delete_running(self.kill_timeout) return super(KMongoTrials, self).count_by_state_unsynced(arg) def delete_running(self, timeout_last_refresh=0, dry_run=False): """Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds """ running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None) # def delete_trial(self, tid): # trial = self.get_trial(tid) # return self.handle.delete(trial) def valid_tid(self): """List all valid tid's """ return [t["tid"] for t in self.trials if t["result"]["status"] == "ok"] def train_history(self, tid=None): """Get train history as pd.DataFrame """ def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df def plot_history(self, tid, scores=["loss", "f1", "accuracy"], figsize=(15, 3)): """Plot the loss curves""" history = self.train_history(tid) import matplotlib.pyplot as plt fig = plt.figure(figsize=figsize) for i, score in enumerate(scores): plt.subplot(1, len(scores), i + 1) plt.tight_layout() plt.plot(history[score], label="train") plt.plot(history['val_' + score], label="validation") plt.title(score) plt.ylabel(score) plt.xlabel('epoch') plt.legend(loc='best') return fig def load_model(self, tid, custom_objects=None): """Load saved keras model of the trial. If tid = None, get the best model Not applicable for trials ran in cross validion (i.e. not applicable for `CompileFN.cv_n_folds is None` """ if tid is None: tid = self.best_trial_tid() model_path = self.get_trial(tid)["result"]["path"]["model"] return load_model(model_path, custom_objects=custom_objects) def n_ok(self): """Number of ok trials() """ if len(self.trials) == 0: return 0 else: return np.sum(np.array(self.statuses()) == "ok") def get_ok_results(self, verbose=True): """Return a list of results with ok status """ if len(self.trials) == 0: return [] not_ok = np.where(np.array(self.statuses()) != "ok")[0] if len(not_ok) > 0 and verbose: print("{0}/{1} trials were not ok.".format(len(not_ok), len(self.trials))) print("Trials: " + str(not_ok)) print("Statuses: " + str(np.array(self.statuses())[not_ok])) r = [merge_dicts({"tid": t["tid"]}, t["result"].to_dict()) for t in self.trials if t["result"]["status"] == "ok"] return r def as_df(self, ignore_vals=["history"], separator=".", verbose=True): """Return a pd.DataFrame view of the whole experiment """ def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first) # -------------------------------------------- # TODO - put to a separate module def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={}, custom_objects=None): """Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation """ def _format_keras_history(history): """nicely format keras history """ return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath, custom_objects=custom_objects) return eval_model(model, valid, add_eval_metrics), hist def eval_model(model, test, add_eval_metrics={}): """Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `kopt.eval_metrics` module. # Returns dictionary with evaluation metrics """ # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics) def get_model(model_fn, train_data, param): """Feed model_fn with train_data and param """ model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param) def get_data(data_fn, param): """Feed data_fn with param """ return data_fn(**merge_dicts(param["data"], param.get("shared", {}))) class CompileFN(): """Compile an objective function that - trains the model on the training set - evaluates the model on the validation set - reports the performance metric on the validation set as the objective loss # Arguments db_name: Database name of the KMongoTrials. exp_name: Experiment name of the KMongoTrials. data_fn: Tuple containing training data as the x,y pair at the first (index=0) element: `((train_x, test_y), ...)`. If `valid_split` and `cv_n_folds` are both `None`, the second (index=1) tuple is used as the validation dataset. add_eval_metrics: Additional list of (global) evaluation metrics. Individual elements can be a string (referring to kopt.eval_metrics) or a function taking two numpy arrays: `y_true`, `y_pred`. These metrics are ment to supplement those specified in `model.compile(.., metrics = .)`. optim_metric: str; Metric to optimize. Must be in `add_eval_metrics` or `model.metrics_names`. optim_metric_mode: one of {min, max}. In `min` mode, training will stop when the optimized metric monitored has stopped decreasing; in `max` mode it will stop when the optimized metric monitored has stopped increasing; in `auto` mode, the direction is automatically inferred from the name of the optimized metric. valid_split: Fraction of the training points to use for the validation. If set to None, the second element returned by data_fn is used as the validation dataset. cv_n_folds: If not None, use cross-validation with `cv_n_folds`-folds instead of train, validation split. Overrides `valid_split` and `use_data_fn_valid`. stratified: boolean. If True, use stratified data splitting in train-validation split or cross-validation. random_state: Random seed for performing data-splits. use_tensorboard: If True, tensorboard callback is used. Each trial is written into a separate `log_dir`. save_model: It not None, the trained model is saved to the `save_dir` directory as hdf5 file. If save_model="best", save the best model using `keras.callbacks.ModelCheckpoint`, and if save_model="last", save the model after training it. save_results: If True, the return value is saved as .json to the `save_dir` directory. save_dir: Path to the save directory. custom_objects: argument passed to load_model - Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. """ # TODO - check if we can get (db_name, exp_name) from hyperopt def __init__(self, db_name, exp_name, data_fn, model_fn, # validation metric add_eval_metrics=[], optim_metric="loss", # val_loss optim_metric_mode="min", # validation split valid_split=.2, cv_n_folds=None, stratified=False, random_state=None, # saving use_tensorboard=False, save_model="best", save_results=True, save_dir=save_dir(), custom_objects=None, **kwargs ): self.data_fn = data_fn self.model_fn = model_fn assert isinstance(add_eval_metrics, (list, tuple, set, dict)) if isinstance(add_eval_metrics, dict): self.add_eval_metrics = {k: _get_ce_fun(v) for k, v in add_eval_metrics.items()} else: self.add_eval_metrics = {_to_string(fn_str): _get_ce_fun(fn_str) for fn_str in add_eval_metrics} assert isinstance(optim_metric, str) # backcompatibility: # allow only "loss_metric" and "loss_metric_mode" to be passed in kwargs if "loss_metric" in kwargs and optim_metric == "loss": optim_metric = kwargs["loss_metric"] if "loss_metric_mode" in kwargs and optim_metric_mode == "min": optim_metric_mode = kwargs["loss_metric_mode"] possible_kwargs = ["loss_metric", "loss_metric_mode"] add_arguments = set(kwargs.keys()).difference(possible_kwargs) if len(add_arguments) > 0: raise ValueError("Unknown argument(s) {0}. **kwargs accepts only arguments: {1}. ". format(add_arguments, possible_kwargs)) self.optim_metric = optim_metric assert optim_metric_mode in ["min", "max"] self.optim_metric_mode = optim_metric_mode self.data_name = data_fn.__name__ self.model_name = model_fn.__name__ self.db_name = db_name self.exp_name = exp_name # validation self.valid_split = valid_split self.cv_n_folds = cv_n_folds self.stratified = stratified self.random_state = random_state # saving self.use_tensorboard = use_tensorboard self.save_dir = save_dir self.save_model = save_model if save_model is not None else "" self.save_results = save_results # loading self.custom_objects = custom_objects # backcompatibility if self.save_model is True: self.save_model = "last" elif self.save_model is False: self.save_model = "" assert self.save_model in ["", "last", "best"] @property def save_dir_exp(self): return self.save_dir + "/{db}/{exp}/".format(db=self.db_name, exp=self.exp_name) def _assert_optim_metric(self, model): model_metrics = _listify(model.metrics_names) eval_metrics = list(self.add_eval_metrics.keys()) if self.optim_metric not in model_metrics + eval_metrics: raise ValueError("optim_metric: '{0}' not in ".format(self.optim_metric) + "either sets of the losses: \n" + "model.metrics_names: {0}\n".format(model_metrics) + "add_eval_metrics: {0}".format(eval_metrics)) def __call__(self, param): time_start = datetime.now() # set default early-stop parameters if param.get("fit") is None: param["fit"] = {} if param["fit"].get("epochs") is None: param["fit"]["epochs"] = 500 # TODO - cleanup callback parameters # - callbacks/early_stop/patience... if param["fit"].get("patience") is None: param["fit"]["patience"] = 10 if param["fit"].get("batch_size") is None: param["fit"]["batch_size"] = 32 if param["fit"].get("early_stop_monitor") is None: param["fit"]["early_stop_monitor"] = "val_loss" callbacks = [EarlyStopping(monitor=param["fit"]["early_stop_monitor"], patience=param["fit"]["patience"])] # setup paths for storing the data - TODO check if we can somehow get the id from hyperopt rid = str(uuid4()) tm_dir = self.save_dir_exp + "/train_models/" if not os.path.exists(tm_dir): os.makedirs(tm_dir) model_path = tm_dir + "{0}.h5".format(rid) if self.save_model else "" results_path = tm_dir + "{0}.json".format(rid) if self.save_results else "" if self.use_tensorboard: max_len = 240 - len(rid) - 1 param_string = _dict_to_filestring(_flatten_dict_ignore(param))[:max_len] + ";" + rid tb_dir = self.save_dir_exp + "/tensorboard/" + param_string[:240] callbacks += [TensorBoard(log_dir=tb_dir, histogram_freq=0, # TODO - set to some number afterwards write_graph=False, write_images=True)] # ----------------- # get data logger.info("Load data...") data = get_data(self.data_fn, param) train = data[0] if self.cv_n_folds is None and self.valid_split is None: valid_data = data[1] del data time_data_loaded = datetime.now() # train & evaluate the model if self.cv_n_folds is None: # no cross-validation model = get_model(self.model_fn, train, param) print(_listify(model.metrics_names)) self._assert_optim_metric(model) if self.valid_split is not None: train_idx, valid_idx = split_train_test_idx(train, self.valid_split, self.stratified, self.random_state) train_data = subset(train, train_idx) valid_data = subset(train, valid_idx) else: train_data = train c_callbacks = deepcopy(callbacks) if self.save_model == "best": c_callbacks += [ModelCheckpoint(model_path, monitor=param["fit"]["early_stop_monitor"], save_best_only=True)] eval_metrics, history = _train_and_eval_single(train=train_data, valid=valid_data, model=model, epochs=param["fit"]["epochs"], batch_size=param["fit"]["batch_size"], use_weight=param["fit"].get("use_weight", False), callbacks=c_callbacks, eval_best=self.save_model == "best", add_eval_metrics=self.add_eval_metrics, custom_objects=self.custom_objects) if self.save_model == "last": model.save(model_path) else: # cross-validation eval_metrics_list = [] history = [] for i, (train_idx, valid_idx) in enumerate(split_KFold_idx(train, self.cv_n_folds, self.stratified, self.random_state)): logger.info("Fold {0}/{1}".format(i + 1, self.cv_n_folds)) model = get_model(self.model_fn, subset(train, train_idx), param) self._assert_optim_metric(model) c_model_path = model_path.replace(".h5", "_fold_{0}.h5".format(i)) c_callbacks = deepcopy(callbacks) if self.save_model == "best": c_callbacks += [ModelCheckpoint(c_model_path, monitor=param["fit"]["early_stop_monitor"], save_best_only=True)] eval_m, history_elem = _train_and_eval_single(train=subset(train, train_idx), valid=subset(train, valid_idx), model=model, epochs=param["fit"]["epochs"], batch_size=param["fit"]["batch_size"], use_weight=param["fit"].get("use_weight", False), callbacks=c_callbacks, eval_best=self.save_model == "best", add_eval_metrics=self.add_eval_metrics, custom_objects=self.custom_objects) print("\n") eval_metrics_list.append(eval_m) history.append(history_elem) if self.save_model == "last": model.save(c_model_path) # summarize metrics - take average accross folds eval_metrics = _mean_dict(eval_metrics_list) # get loss from eval_metrics loss = eval_metrics[self.optim_metric] if self.optim_metric_mode == "max": loss = - loss # loss should get minimized time_end = datetime.now() ret = {"loss": loss, "status": STATUS_OK, "eval": eval_metrics, # additional info "param": param, "path": { "model": model_path, "results": results_path, }, "name": { "data": self.data_name, "model": self.model_name, "optim_metric": self.optim_metric, "optim_metric_mode": self.optim_metric, }, "history": history, # execution times "time": { "start": str(time_start), "end": str(time_end), "duration": { "total": (time_end - time_start).total_seconds(), # in seconds "dataload": (time_data_loaded - time_start).total_seconds(), "training": (time_end - time_data_loaded).total_seconds(), }}} # optionally save information to disk if results_path: write_json(ret, results_path) logger.info("Done!") return ret # Style guide: # ------------- # # path structure: # /s/project/deepcis/hyperopt/db/exp/... # /train_models/ # /best_model.h5 # hyper-params format: # # data: ... (pre-preprocessing parameters) # model: (architecture, etc) # train: (epochs, patience...) # -------------------------------------------- # helper functions def _delete_keys(dct, keys): """Returns a copy of dct without `keys` keys """ c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c def _mean_dict(dict_list): """Compute the mean value across a list of dictionaries """ return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()} def _put_first(df, names): df = df.reindex(columns=names + [c for c in df.columns if c not in names]) return df def _listify(arg): if hasattr(type(arg), '__len__'): return arg return [arg, ] def _get_ce_fun(fn_str): if isinstance(fn_str, str): return ce.get(fn_str) elif callable(fn_str): return fn_str else: raise ValueError("fn_str has to be callable or str") def _flatten_dict(dd, separator='_', prefix=''): return {prefix + separator + k if prefix else k: v for kk, vv in dd.items() for k, v in _flatten_dict(vv, separator, kk).items() } if isinstance(dd, dict) else {prefix: dd} def _flatten_dict_ignore(dd, prefix=''): return {k if prefix else k: v for kk, vv in dd.items() for k, v in _flatten_dict_ignore(vv, kk).items() } if isinstance(dd, dict) else {prefix: dd} def _dict_to_filestring(d): def to_str(v): if isinstance(v, float): return '%s' % float('%.2g' % v) else: return str(v) return ";".join([k + "=" + to_str(v) for k, v in d.items()])
# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the LVM driver module.""" import os import ddt import mock from oslo_config import cfg from manila.common import constants as const from manila import context from manila import exception from manila.share import configuration from manila.share.drivers import lvm from manila import test from manila.tests.db import fakes as db_fakes from manila.tests import fake_utils from manila.tests.share.drivers import test_generic CONF = cfg.CONF def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_snapshot(**kwargs): snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', 'share': { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', }, } snapshot.update(kwargs) return db_fakes.FakeModel(snapshot) def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'access_level': 'rw', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) @ddt.ddt class LVMShareDriverTestCase(test.TestCase): """Tests LVMShareDriver.""" def setUp(self): super(LVMShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._context = context.get_admin_context() CONF.set_default('lvm_share_volume_group', 'fakevg') CONF.set_default('lvm_share_export_ip', '10.0.0.1') CONF.set_default('driver_handles_share_servers', False) CONF.set_default('reserved_share_percentage', 50) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() self.fake_conf = configuration.Configuration(None) self._db = mock.Mock() self._os = lvm.os = mock.Mock() self._os.path.join = os.path.join self._driver = lvm.LVMShareDriver(self._db, configuration=self.fake_conf) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share() self.access = fake_access() self.snapshot = fake_snapshot() self.server = { 'public_address': self.fake_conf.lvm_share_export_ip, 'instance_id': 'LVM', 'lock_name': 'manila_lvm', } # Used only to test compatibility with share manager self.share_server = "fake_share_server" def tearDown(self): super(LVMShareDriverTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_do_setup(self): CONF.set_default('lvm_share_helpers', ['NFS=fakenfs']) lvm.importutils = mock.Mock() lvm.importutils.import_class.return_value = self._helper_nfs self._driver.do_setup(self._context) lvm.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) def test_check_for_setup_error(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake1\n fakevg\n fake2\n', '' expected_exec = ['vgs --noheadings -o name'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self._driver.check_for_setup_error() self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_check_for_setup_error_no_vg(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake0\n fake1\n fake2\n', '' fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', exec_runner)]) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_no_export_ip(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake1\n fakevg\n fake2\n', '' fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', exec_runner)]) CONF.set_default('lvm_share_export_ip', None) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_local_path_normal(self): share = fake_share(name='fake_sharename') CONF.set_default('lvm_share_volume_group', 'fake_vg') ret = self._driver._get_local_path(share) self.assertEqual('/dev/mapper/fake_vg-fake_sharename', ret) def test_local_path_escapes(self): share = fake_share(name='fake-sharename') CONF.set_default('lvm_share_volume_group', 'fake-vg') ret = self._driver._get_local_path(share) self.assertEqual('/dev/mapper/fake--vg-fake--sharename', ret) def test_create_share(self): CONF.set_default('lvm_share_mirrors', 0) self._driver._mount_device = mock.Mock() ret = self._driver.create_share(self._context, self.share, self.share_server) self._driver._mount_device.assert_called_with( self.share, '/dev/mapper/fakevg-fakename') expected_exec = [ 'lvcreate -L 1G -n fakename fakevg', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual(self._helper_nfs.create_exports.return_value, ret) def test_create_share_from_snapshot(self): CONF.set_default('lvm_share_mirrors', 0) self._driver._mount_device = mock.Mock() snapshot_instance = { 'snapshot_id': 'fakesnapshotid', 'name': 'fakename' } mount_share = '/dev/mapper/fakevg-fakename' mount_snapshot = '/dev/mapper/fakevg-fakename' self._helper_nfs.create_export.return_value = 'fakelocation' self._driver.create_share_from_snapshot(self._context, self.share, snapshot_instance, self.share_server) self._driver._mount_device.assert_called_with(self.share, mount_snapshot) expected_exec = [ 'lvcreate -L 1G -n fakename fakevg', 'mkfs.ext4 /dev/mapper/fakevg-fakename', 'tune2fs -U random %s' % mount_share, ("dd count=0 if=%s of=%s iflag=direct oflag=direct" % (mount_snapshot, mount_share)), ("dd if=%s of=%s count=1024 bs=1M iflag=direct oflag=direct" % (mount_snapshot, mount_share)), ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_create_share_mirrors(self): share = fake_share(size='2048') CONF.set_default('lvm_share_mirrors', 2) self._driver._mount_device = mock.Mock() ret = self._driver.create_share(self._context, share, self.share_server) self._driver._mount_device.assert_called_with( share, '/dev/mapper/fakevg-fakename') expected_exec = [ 'lvcreate -L 2048G -n fakename fakevg -m 2 --nosync -R 2', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual(self._helper_nfs.create_exports.return_value, ret) def test_deallocate_container(self): expected_exec = ['lvremove -f fakevg/fakename'] self._driver._deallocate_container(self.share['name']) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_deallocate_container_error(self): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr="error") self.mock_object(self._driver, '_try_execute', _fake_exec) self.assertRaises(exception.ProcessExecutionError, self._driver._deallocate_container, self.share['name']) def test_deallocate_container_not_found_error(self): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr="not found") self.mock_object(self._driver, '_try_execute', _fake_exec) self._driver._deallocate_container(self.share['name']) @mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock()) def test_get_share_stats(self): with mock.patch.object(self._driver, '_stats', mock.Mock) as stats: self.assertEqual(stats, self._driver.get_share_stats()) self.assertFalse(self._driver._update_share_stats.called) @mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock()) def test_get_share_stats_refresh(self): with mock.patch.object(self._driver, '_stats', mock.Mock) as stats: self.assertEqual(stats, self._driver.get_share_stats(refresh=True)) self._driver._update_share_stats.assert_called_once_with() def test_remove_export(self): mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self._driver._remove_export(self._context, self.share) expected_exec = [ "umount -f %s" % (mount_path,), ] self._os.path.exists.assert_called_with(mount_path) self._os.rmdir.assert_called_with(mount_path) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_remove_export_is_busy_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='device is busy') self._os.path.exists.return_value = True mount_path = self._get_mount_path(self.share) expected_exec = [ "umount -f %s" % (mount_path), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ShareBusyException, self._driver._remove_export, self._context, self.share) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_remove_export_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='fake error') mount_path = self._get_mount_path(self.share) expected_exec = [ "umount -f %s" % (mount_path), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self._os.path.exists.return_value = True self._driver._remove_export(self._context, self.share) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_remove_export_rmdir_error(self): mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self.mock_object(self._os, 'rmdir', mock.Mock(side_effect=OSError)) self._driver._remove_export(self._context, self.share) expected_exec = [ "umount -f %s" % (mount_path,), ] self._os.path.exists.assert_called_with(mount_path) self._os.rmdir.assert_called_with(mount_path) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_create_snapshot(self): self._driver.create_snapshot(self._context, self.snapshot, self.share_server) mount_path = self._get_mount_path(self.snapshot) expected_exec = [ ("lvcreate -L 1G --name fakesnapshotname --snapshot " "%s/fakename" % (CONF.lvm_share_volume_group,)), "tune2fs -U random /dev/mapper/fakevg-%s" % self.snapshot['name'], "mkdir -p " + mount_path, "mount /dev/mapper/fakevg-fakesnapshotname " + mount_path, "chmod 777 " + mount_path, ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_ensure_share(self): device_name = '/dev/mapper/fakevg-fakename' with mock.patch.object(self._driver, '_mount_device', mock.Mock(return_value='fake_location')): self._driver.ensure_share(self._context, self.share, self.share_server) self._driver._mount_device.assert_called_with(self.share, device_name) self._helper_nfs.create_exports.assert_called_once_with( self.server, self.share['name'], recreate=True) def test_delete_share(self): mount_path = self._get_mount_path(self.share) self._helper_nfs.remove_export(mount_path, self.share['name']) self._driver._delete_share(self._context, self.share) def test_delete_snapshot(self): expected_exec = [ 'umount -f ' + self._get_mount_path(self.snapshot), 'lvremove -f fakevg/fakesnapshotname', ] self._driver.delete_snapshot(self._context, self.snapshot, self.share_server) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_delete_share_invalid_share(self): self._driver._get_helper = mock.Mock( side_effect=exception.InvalidShare(reason='fake')) self._driver.delete_share(self._context, self.share, self.share_server) def test_delete_share_process_execution_error(self): self.mock_object( self._helper_nfs, 'remove_export', mock.Mock(side_effect=exception.ProcessExecutionError)) self._driver._delete_share(self._context, self.share) self._helper_nfs.remove_exports.assert_called_once_with( self.server, self.share['name']) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): access_rules = [test_generic.get_fake_access_rule( '1.1.1.1', access_level), ] add_rules = [test_generic.get_fake_access_rule( '2.2.2.2', access_level), ] delete_rules = [test_generic.get_fake_access_rule( '3.3.3.3', access_level), ] self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, share_server=self.server) (self._driver._helpers[self.share['share_proto']]. update_access.assert_called_once_with( self.server, self.share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules)) def test_mount_device(self): mount_path = self._get_mount_path(self.share) ret = self._driver._mount_device(self.share, 'fakedevice') expected_exec = [ "mkdir -p %s" % (mount_path,), "mount fakedevice %s" % (mount_path,), "chmod 777 %s" % (mount_path,), ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual(mount_path, ret) def test_mount_device_already(self): def exec_runner(*args, **kwargs): if 'mount' in args and '-l' not in args: raise exception.ProcessExecutionError() else: return 'fakedevice', '' self.mock_object(self._driver, '_execute', exec_runner) mount_path = self._get_mount_path(self.share) ret = self._driver._mount_device(self.share, 'fakedevice') self.assertEqual(mount_path, ret) def test_mount_device_error(self): def exec_runner(*args, **kwargs): if 'mount' in args and '-l' not in args: raise exception.ProcessExecutionError() else: return 'fake', '' self.mock_object(self._driver, '_execute', exec_runner) self.assertRaises(exception.ProcessExecutionError, self._driver._mount_device, self.share, 'fakedevice') def test_get_helper(self): share_cifs = fake_share(share_proto='CIFS') share_nfs = fake_share(share_proto='NFS') share_fake = fake_share(share_proto='FAKE') self.assertEqual(self._driver._get_helper(share_cifs), self._helper_cifs) self.assertEqual(self._driver._get_helper(share_nfs), self._helper_nfs) self.assertRaises(exception.InvalidShare, self._driver._get_helper, share_fake) def _get_mount_path(self, share): return os.path.join(CONF.lvm_share_export_root, share['name']) def test_unmount_device(self): mount_path = self._get_mount_path(self.share) self.mock_object(self._driver, '_execute') self._driver._unmount_device(self.share) self._driver._execute.assert_any_call('umount', mount_path, run_as_root=True) self._driver._execute.assert_any_call('rmdir', mount_path, run_as_root=True) def test_extend_share(self): local_path = self._driver._get_local_path(self.share) self.mock_object(self._driver, '_extend_container') self.mock_object(self._driver, '_execute') self._driver.extend_share(self.share, 3) self._driver._extend_container.assert_called_once_with(self.share, local_path, 3) self._driver._execute.assert_called_once_with('resize2fs', local_path, run_as_root=True) def test_ssh_exec_as_root(self): command = ['fake_command'] self.mock_object(self._driver, '_execute') self._driver._ssh_exec_as_root('fake_server', command) self._driver._execute.assert_called_once_with('fake_command', check_exit_code=True) def test_ssh_exec_as_root_with_sudo(self): command = ['sudo', 'fake_command'] self.mock_object(self._driver, '_execute') self._driver._ssh_exec_as_root('fake_server', command) self._driver._execute.assert_called_once_with( 'fake_command', run_as_root=True, check_exit_code=True) def test_extend_container(self): self.mock_object(self._driver, '_try_execute') self._driver._extend_container(self.share, 'device_name', 3) self._driver._try_execute.assert_called_once_with( 'lvextend', '-L', '3G', '-n', 'device_name', run_as_root=True) def test_get_share_server_pools(self): expected_result = [{ 'pool_name': 'lvm-single-pool', 'total_capacity_gb': 33, 'free_capacity_gb': 22, 'reserved_percentage': 0, }, ] self.mock_object( self._driver, '_execute', mock.Mock(return_value=("VSize 33g VFree 22g", None))) self.assertEqual(expected_result, self._driver.get_share_server_pools()) self._driver._execute.assert_called_once_with( 'vgs', 'fakevg', '--rows', '--units', 'g', run_as_root=True) def test_copy_volume_error(self): def _fake_exec(*args, **kwargs): if 'count=0' in args: raise exception.ProcessExecutionError() self.mock_object(self._driver, '_execute', mock.Mock(side_effect=_fake_exec)) self._driver._copy_volume('src', 'dest', 1) self._driver._execute.assert_any_call('dd', 'count=0', 'if=src', 'of=dest', 'iflag=direct', 'oflag=direct', run_as_root=True) self._driver._execute.assert_any_call('dd', 'if=src', 'of=dest', 'count=1024', 'bs=1M', run_as_root=True) def test_update_share_stats(self): self.mock_object(self._driver, 'get_share_server_pools', mock.Mock(return_value='test-pool')) self._driver._update_share_stats() self.assertEqual('LVM', self._driver._stats['share_backend_name']) self.assertEqual('NFS_CIFS', self._driver._stats['storage_protocol']) self.assertEqual(50, self._driver._stats['reserved_percentage']) self.assertTrue(self._driver._stats['snapshot_support']) self.assertEqual('LVMShareDriver', self._driver._stats['driver_name']) self.assertEqual('test-pool', self._driver._stats['pools']) def test_revert_to_snapshot(self): mock_update_access = self.mock_object(self._helper_nfs, 'update_access') self._driver.revert_to_snapshot(self._context, self.snapshot, [], self.share_server) snap_lv = "%s/fakesnapshotname" % (CONF.lvm_share_volume_group) share_lv = "%s/fakename" % (CONF.lvm_share_volume_group) share_mount_path = self._get_mount_path(self.snapshot['share']) snapshot_mount_path = self._get_mount_path(self.snapshot) expected_exec = [ ('umount -f %s' % snapshot_mount_path), ("lvconvert --merge %s" % snap_lv), ("umount %s" % share_mount_path), ("rmdir %s" % share_mount_path), ("lvchange -an %s" % share_lv), ("lvchange -ay %s" % share_lv), ("lvcreate -L 1G --name fakesnapshotname --snapshot %s" % share_lv), ('tune2fs -U random /dev/mapper/%s-fakesnapshotname' % CONF.lvm_share_volume_group), ("mkdir -p %s" % share_mount_path), ("mount /dev/mapper/%s-fakename %s" % (CONF.lvm_share_volume_group, share_mount_path)), ("chmod 777 %s" % share_mount_path), ("mkdir -p %s" % snapshot_mount_path), ("mount /dev/mapper/fakevg-fakesnapshotname " "%s" % snapshot_mount_path), ("chmod 777 %s" % snapshot_mount_path), ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual(2, mock_update_access.call_count) def test_snapshot_update_access(self): access_rules = [{ 'access_type': 'ip', 'access_to': '1.1.1.1', 'access_level': 'ro', }] add_rules = [{ 'access_type': 'ip', 'access_to': '2.2.2.2', 'access_level': 'ro', }] delete_rules = [{ 'access_type': 'ip', 'access_to': '3.3.3.3', 'access_level': 'ro', }] self._driver.snapshot_update_access(self._context, self.snapshot, access_rules, add_rules, delete_rules) (self._driver._helpers[self.snapshot['share']['share_proto']]. update_access.assert_called_once_with( self.server, self.snapshot['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules))
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.ads.googleads.v8.enums.types import age_range_type from google.ads.googleads.v8.enums.types import app_payment_model_type from google.ads.googleads.v8.enums.types import content_label_type from google.ads.googleads.v8.enums.types import day_of_week as gage_day_of_week from google.ads.googleads.v8.enums.types import device from google.ads.googleads.v8.enums.types import gender_type from google.ads.googleads.v8.enums.types import hotel_date_selection_type from google.ads.googleads.v8.enums.types import income_range_type from google.ads.googleads.v8.enums.types import interaction_type from google.ads.googleads.v8.enums.types import keyword_match_type from google.ads.googleads.v8.enums.types import listing_group_type from google.ads.googleads.v8.enums.types import location_group_radius_units from google.ads.googleads.v8.enums.types import minute_of_hour from google.ads.googleads.v8.enums.types import parental_status_type from google.ads.googleads.v8.enums.types import preferred_content_type from google.ads.googleads.v8.enums.types import product_bidding_category_level from google.ads.googleads.v8.enums.types import ( product_channel as gage_product_channel, ) from google.ads.googleads.v8.enums.types import ( product_channel_exclusivity as gage_product_channel_exclusivity, ) from google.ads.googleads.v8.enums.types import ( product_condition as gage_product_condition, ) from google.ads.googleads.v8.enums.types import product_custom_attribute_index from google.ads.googleads.v8.enums.types import product_type_level from google.ads.googleads.v8.enums.types import proximity_radius_units from google.ads.googleads.v8.enums.types import webpage_condition_operand from google.ads.googleads.v8.enums.types import webpage_condition_operator __protobuf__ = proto.module( package="google.ads.googleads.v8.common", marshal="google.ads.googleads.v8", manifest={ "KeywordInfo", "PlacementInfo", "MobileAppCategoryInfo", "MobileApplicationInfo", "LocationInfo", "DeviceInfo", "PreferredContentInfo", "ListingGroupInfo", "ListingScopeInfo", "ListingDimensionInfo", "HotelIdInfo", "HotelClassInfo", "HotelCountryRegionInfo", "HotelStateInfo", "HotelCityInfo", "ProductBiddingCategoryInfo", "ProductBrandInfo", "ProductChannelInfo", "ProductChannelExclusivityInfo", "ProductConditionInfo", "ProductCustomAttributeInfo", "ProductItemIdInfo", "ProductTypeInfo", "UnknownListingDimensionInfo", "HotelDateSelectionTypeInfo", "HotelAdvanceBookingWindowInfo", "HotelLengthOfStayInfo", "HotelCheckInDateRangeInfo", "HotelCheckInDayInfo", "InteractionTypeInfo", "AdScheduleInfo", "AgeRangeInfo", "GenderInfo", "IncomeRangeInfo", "ParentalStatusInfo", "YouTubeVideoInfo", "YouTubeChannelInfo", "UserListInfo", "ProximityInfo", "GeoPointInfo", "AddressInfo", "TopicInfo", "LanguageInfo", "IpBlockInfo", "ContentLabelInfo", "CarrierInfo", "UserInterestInfo", "WebpageInfo", "WebpageConditionInfo", "WebpageSampleInfo", "OperatingSystemVersionInfo", "AppPaymentModelInfo", "MobileDeviceInfo", "CustomAffinityInfo", "CustomIntentInfo", "LocationGroupInfo", "CustomAudienceInfo", "CombinedAudienceInfo", "KeywordThemeInfo", }, ) class KeywordInfo(proto.Message): r"""A keyword criterion. Attributes: text (str): The text of the keyword (at most 80 characters and 10 words). match_type (google.ads.googleads.v8.enums.types.KeywordMatchTypeEnum.KeywordMatchType): The match type of the keyword. """ text = proto.Field(proto.STRING, number=3, optional=True,) match_type = proto.Field( proto.ENUM, number=2, enum=keyword_match_type.KeywordMatchTypeEnum.KeywordMatchType, ) class PlacementInfo(proto.Message): r"""A placement criterion. This can be used to modify bids for sites when targeting the content network. Attributes: url (str): URL of the placement. For example, "http://www.domain.com". """ url = proto.Field(proto.STRING, number=2, optional=True,) class MobileAppCategoryInfo(proto.Message): r"""A mobile app category criterion. Attributes: mobile_app_category_constant (str): The mobile app category constant resource name. """ mobile_app_category_constant = proto.Field( proto.STRING, number=2, optional=True, ) class MobileApplicationInfo(proto.Message): r"""A mobile application criterion. Attributes: app_id (str): A string that uniquely identifies a mobile application to Google Ads API. The format of this string is "{platform}-{platform_native_id}", where platform is "1" for iOS apps and "2" for Android apps, and where platform_native_id is the mobile application identifier native to the corresponding platform. For iOS, this native identifier is the 9 digit string that appears at the end of an App Store URL (e.g., "476943146" for "Flood-It! 2" whose App Store link is "http://itunes.apple.com/us/app/flood-it!-2/id476943146"). For Android, this native identifier is the application's package name (e.g., "com.labpixies.colordrips" for "Color Drips" given Google Play link "https://play.google.com/store/apps/details?id=com.labpixies.colordrips"). A well formed app id for Google Ads API would thus be "1-476943146" for iOS and "2-com.labpixies.colordrips" for Android. This field is required and must be set in CREATE operations. name (str): Name of this mobile application. """ app_id = proto.Field(proto.STRING, number=4, optional=True,) name = proto.Field(proto.STRING, number=5, optional=True,) class LocationInfo(proto.Message): r"""A location criterion. Attributes: geo_target_constant (str): The geo target constant resource name. """ geo_target_constant = proto.Field(proto.STRING, number=2, optional=True,) class DeviceInfo(proto.Message): r"""A device criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.DeviceEnum.Device): Type of the device. """ type_ = proto.Field(proto.ENUM, number=1, enum=device.DeviceEnum.Device,) class PreferredContentInfo(proto.Message): r"""A preferred content criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.PreferredContentTypeEnum.PreferredContentType): Type of the preferred content. """ type_ = proto.Field( proto.ENUM, number=2, enum=preferred_content_type.PreferredContentTypeEnum.PreferredContentType, ) class ListingGroupInfo(proto.Message): r"""A listing group criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.ListingGroupTypeEnum.ListingGroupType): Type of the listing group. case_value (google.ads.googleads.v8.common.types.ListingDimensionInfo): Dimension value with which this listing group is refining its parent. Undefined for the root group. parent_ad_group_criterion (str): Resource name of ad group criterion which is the parent listing group subdivision. Null for the root group. """ type_ = proto.Field( proto.ENUM, number=1, enum=listing_group_type.ListingGroupTypeEnum.ListingGroupType, ) case_value = proto.Field( proto.MESSAGE, number=2, message="ListingDimensionInfo", ) parent_ad_group_criterion = proto.Field( proto.STRING, number=4, optional=True, ) class ListingScopeInfo(proto.Message): r"""A listing scope criterion. Attributes: dimensions (Sequence[google.ads.googleads.v8.common.types.ListingDimensionInfo]): Scope of the campaign criterion. """ dimensions = proto.RepeatedField( proto.MESSAGE, number=2, message="ListingDimensionInfo", ) class ListingDimensionInfo(proto.Message): r"""Listing dimensions for listing group criterion. Attributes: hotel_id (google.ads.googleads.v8.common.types.HotelIdInfo): Advertiser-specific hotel ID. hotel_class (google.ads.googleads.v8.common.types.HotelClassInfo): Class of the hotel as a number of stars 1 to 5. hotel_country_region (google.ads.googleads.v8.common.types.HotelCountryRegionInfo): Country or Region the hotel is located in. hotel_state (google.ads.googleads.v8.common.types.HotelStateInfo): State the hotel is located in. hotel_city (google.ads.googleads.v8.common.types.HotelCityInfo): City the hotel is located in. product_bidding_category (google.ads.googleads.v8.common.types.ProductBiddingCategoryInfo): Bidding category of a product offer. product_brand (google.ads.googleads.v8.common.types.ProductBrandInfo): Brand of a product offer. product_channel (google.ads.googleads.v8.common.types.ProductChannelInfo): Locality of a product offer. product_channel_exclusivity (google.ads.googleads.v8.common.types.ProductChannelExclusivityInfo): Availability of a product offer. product_condition (google.ads.googleads.v8.common.types.ProductConditionInfo): Condition of a product offer. product_custom_attribute (google.ads.googleads.v8.common.types.ProductCustomAttributeInfo): Custom attribute of a product offer. product_item_id (google.ads.googleads.v8.common.types.ProductItemIdInfo): Item id of a product offer. product_type (google.ads.googleads.v8.common.types.ProductTypeInfo): Type of a product offer. unknown_listing_dimension (google.ads.googleads.v8.common.types.UnknownListingDimensionInfo): Unknown dimension. Set when no other listing dimension is set. """ hotel_id = proto.Field( proto.MESSAGE, number=2, oneof="dimension", message="HotelIdInfo", ) hotel_class = proto.Field( proto.MESSAGE, number=3, oneof="dimension", message="HotelClassInfo", ) hotel_country_region = proto.Field( proto.MESSAGE, number=4, oneof="dimension", message="HotelCountryRegionInfo", ) hotel_state = proto.Field( proto.MESSAGE, number=5, oneof="dimension", message="HotelStateInfo", ) hotel_city = proto.Field( proto.MESSAGE, number=6, oneof="dimension", message="HotelCityInfo", ) product_bidding_category = proto.Field( proto.MESSAGE, number=13, oneof="dimension", message="ProductBiddingCategoryInfo", ) product_brand = proto.Field( proto.MESSAGE, number=15, oneof="dimension", message="ProductBrandInfo", ) product_channel = proto.Field( proto.MESSAGE, number=8, oneof="dimension", message="ProductChannelInfo", ) product_channel_exclusivity = proto.Field( proto.MESSAGE, number=9, oneof="dimension", message="ProductChannelExclusivityInfo", ) product_condition = proto.Field( proto.MESSAGE, number=10, oneof="dimension", message="ProductConditionInfo", ) product_custom_attribute = proto.Field( proto.MESSAGE, number=16, oneof="dimension", message="ProductCustomAttributeInfo", ) product_item_id = proto.Field( proto.MESSAGE, number=11, oneof="dimension", message="ProductItemIdInfo", ) product_type = proto.Field( proto.MESSAGE, number=12, oneof="dimension", message="ProductTypeInfo", ) unknown_listing_dimension = proto.Field( proto.MESSAGE, number=14, oneof="dimension", message="UnknownListingDimensionInfo", ) class HotelIdInfo(proto.Message): r"""Advertiser-specific hotel ID. Attributes: value (str): String value of the hotel ID. """ value = proto.Field(proto.STRING, number=2, optional=True,) class HotelClassInfo(proto.Message): r"""Class of the hotel as a number of stars 1 to 5. Attributes: value (int): Long value of the hotel class. """ value = proto.Field(proto.INT64, number=2, optional=True,) class HotelCountryRegionInfo(proto.Message): r"""Country or Region the hotel is located in. Attributes: country_region_criterion (str): The Geo Target Constant resource name. """ country_region_criterion = proto.Field( proto.STRING, number=2, optional=True, ) class HotelStateInfo(proto.Message): r"""State the hotel is located in. Attributes: state_criterion (str): The Geo Target Constant resource name. """ state_criterion = proto.Field(proto.STRING, number=2, optional=True,) class HotelCityInfo(proto.Message): r"""City the hotel is located in. Attributes: city_criterion (str): The Geo Target Constant resource name. """ city_criterion = proto.Field(proto.STRING, number=2, optional=True,) class ProductBiddingCategoryInfo(proto.Message): r"""Bidding category of a product offer. Attributes: id (int): ID of the product bidding category. This ID is equivalent to the google_product_category ID as described in this article: https://support.google.com/merchants/answer/6324436 country_code (str): Two-letter upper-case country code of the product bidding category. It must match the campaign.shopping_setting.sales_country field. level (google.ads.googleads.v8.enums.types.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel): Level of the product bidding category. """ id = proto.Field(proto.INT64, number=4, optional=True,) country_code = proto.Field(proto.STRING, number=5, optional=True,) level = proto.Field( proto.ENUM, number=3, enum=product_bidding_category_level.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel, ) class ProductBrandInfo(proto.Message): r"""Brand of the product. Attributes: value (str): String value of the product brand. """ value = proto.Field(proto.STRING, number=2, optional=True,) class ProductChannelInfo(proto.Message): r"""Locality of a product offer. Attributes: channel (google.ads.googleads.v8.enums.types.ProductChannelEnum.ProductChannel): Value of the locality. """ channel = proto.Field( proto.ENUM, number=1, enum=gage_product_channel.ProductChannelEnum.ProductChannel, ) class ProductChannelExclusivityInfo(proto.Message): r"""Availability of a product offer. Attributes: channel_exclusivity (google.ads.googleads.v8.enums.types.ProductChannelExclusivityEnum.ProductChannelExclusivity): Value of the availability. """ channel_exclusivity = proto.Field( proto.ENUM, number=1, enum=gage_product_channel_exclusivity.ProductChannelExclusivityEnum.ProductChannelExclusivity, ) class ProductConditionInfo(proto.Message): r"""Condition of a product offer. Attributes: condition (google.ads.googleads.v8.enums.types.ProductConditionEnum.ProductCondition): Value of the condition. """ condition = proto.Field( proto.ENUM, number=1, enum=gage_product_condition.ProductConditionEnum.ProductCondition, ) class ProductCustomAttributeInfo(proto.Message): r"""Custom attribute of a product offer. Attributes: value (str): String value of the product custom attribute. index (google.ads.googleads.v8.enums.types.ProductCustomAttributeIndexEnum.ProductCustomAttributeIndex): Indicates the index of the custom attribute. """ value = proto.Field(proto.STRING, number=3, optional=True,) index = proto.Field( proto.ENUM, number=2, enum=product_custom_attribute_index.ProductCustomAttributeIndexEnum.ProductCustomAttributeIndex, ) class ProductItemIdInfo(proto.Message): r"""Item id of a product offer. Attributes: value (str): Value of the id. """ value = proto.Field(proto.STRING, number=2, optional=True,) class ProductTypeInfo(proto.Message): r"""Type of a product offer. Attributes: value (str): Value of the type. level (google.ads.googleads.v8.enums.types.ProductTypeLevelEnum.ProductTypeLevel): Level of the type. """ value = proto.Field(proto.STRING, number=3, optional=True,) level = proto.Field( proto.ENUM, number=2, enum=product_type_level.ProductTypeLevelEnum.ProductTypeLevel, ) class UnknownListingDimensionInfo(proto.Message): r"""Unknown listing dimension. """ class HotelDateSelectionTypeInfo(proto.Message): r"""Criterion for hotel date selection (default dates vs. user selected). Attributes: type_ (google.ads.googleads.v8.enums.types.HotelDateSelectionTypeEnum.HotelDateSelectionType): Type of the hotel date selection """ type_ = proto.Field( proto.ENUM, number=1, enum=hotel_date_selection_type.HotelDateSelectionTypeEnum.HotelDateSelectionType, ) class HotelAdvanceBookingWindowInfo(proto.Message): r"""Criterion for number of days prior to the stay the booking is being made. Attributes: min_days (int): Low end of the number of days prior to the stay. max_days (int): High end of the number of days prior to the stay. """ min_days = proto.Field(proto.INT64, number=3, optional=True,) max_days = proto.Field(proto.INT64, number=4, optional=True,) class HotelLengthOfStayInfo(proto.Message): r"""Criterion for length of hotel stay in nights. Attributes: min_nights (int): Low end of the number of nights in the stay. max_nights (int): High end of the number of nights in the stay. """ min_nights = proto.Field(proto.INT64, number=3, optional=True,) max_nights = proto.Field(proto.INT64, number=4, optional=True,) class HotelCheckInDateRangeInfo(proto.Message): r"""Criterion for a check-in date range. Attributes: start_date (str): Start date in the YYYY-MM-DD format. end_date (str): End date in the YYYY-MM-DD format. """ start_date = proto.Field(proto.STRING, number=1,) end_date = proto.Field(proto.STRING, number=2,) class HotelCheckInDayInfo(proto.Message): r"""Criterion for day of the week the booking is for. Attributes: day_of_week (google.ads.googleads.v8.enums.types.DayOfWeekEnum.DayOfWeek): The day of the week. """ day_of_week = proto.Field( proto.ENUM, number=1, enum=gage_day_of_week.DayOfWeekEnum.DayOfWeek, ) class InteractionTypeInfo(proto.Message): r"""Criterion for Interaction Type. Attributes: type_ (google.ads.googleads.v8.enums.types.InteractionTypeEnum.InteractionType): The interaction type. """ type_ = proto.Field( proto.ENUM, number=1, enum=interaction_type.InteractionTypeEnum.InteractionType, ) class AdScheduleInfo(proto.Message): r"""Represents an AdSchedule criterion. AdSchedule is specified as the day of the week and a time interval within which ads will be shown. No more than six AdSchedules can be added for the same day. Attributes: start_minute (google.ads.googleads.v8.enums.types.MinuteOfHourEnum.MinuteOfHour): Minutes after the start hour at which this schedule starts. This field is required for CREATE operations and is prohibited on UPDATE operations. end_minute (google.ads.googleads.v8.enums.types.MinuteOfHourEnum.MinuteOfHour): Minutes after the end hour at which this schedule ends. The schedule is exclusive of the end minute. This field is required for CREATE operations and is prohibited on UPDATE operations. start_hour (int): Starting hour in 24 hour time. This field must be between 0 and 23, inclusive. This field is required for CREATE operations and is prohibited on UPDATE operations. end_hour (int): Ending hour in 24 hour time; 24 signifies end of the day. This field must be between 0 and 24, inclusive. This field is required for CREATE operations and is prohibited on UPDATE operations. day_of_week (google.ads.googleads.v8.enums.types.DayOfWeekEnum.DayOfWeek): Day of the week the schedule applies to. This field is required for CREATE operations and is prohibited on UPDATE operations. """ start_minute = proto.Field( proto.ENUM, number=1, enum=minute_of_hour.MinuteOfHourEnum.MinuteOfHour, ) end_minute = proto.Field( proto.ENUM, number=2, enum=minute_of_hour.MinuteOfHourEnum.MinuteOfHour, ) start_hour = proto.Field(proto.INT32, number=6, optional=True,) end_hour = proto.Field(proto.INT32, number=7, optional=True,) day_of_week = proto.Field( proto.ENUM, number=5, enum=gage_day_of_week.DayOfWeekEnum.DayOfWeek, ) class AgeRangeInfo(proto.Message): r"""An age range criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.AgeRangeTypeEnum.AgeRangeType): Type of the age range. """ type_ = proto.Field( proto.ENUM, number=1, enum=age_range_type.AgeRangeTypeEnum.AgeRangeType, ) class GenderInfo(proto.Message): r"""A gender criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.GenderTypeEnum.GenderType): Type of the gender. """ type_ = proto.Field( proto.ENUM, number=1, enum=gender_type.GenderTypeEnum.GenderType, ) class IncomeRangeInfo(proto.Message): r"""An income range criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.IncomeRangeTypeEnum.IncomeRangeType): Type of the income range. """ type_ = proto.Field( proto.ENUM, number=1, enum=income_range_type.IncomeRangeTypeEnum.IncomeRangeType, ) class ParentalStatusInfo(proto.Message): r"""A parental status criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.ParentalStatusTypeEnum.ParentalStatusType): Type of the parental status. """ type_ = proto.Field( proto.ENUM, number=1, enum=parental_status_type.ParentalStatusTypeEnum.ParentalStatusType, ) class YouTubeVideoInfo(proto.Message): r"""A YouTube Video criterion. Attributes: video_id (str): YouTube video id as it appears on the YouTube watch page. """ video_id = proto.Field(proto.STRING, number=2, optional=True,) class YouTubeChannelInfo(proto.Message): r"""A YouTube Channel criterion. Attributes: channel_id (str): The YouTube uploader channel id or the channel code of a YouTube channel. """ channel_id = proto.Field(proto.STRING, number=2, optional=True,) class UserListInfo(proto.Message): r"""A User List criterion. Represents a user list that is defined by the advertiser to be targeted. Attributes: user_list (str): The User List resource name. """ user_list = proto.Field(proto.STRING, number=2, optional=True,) class ProximityInfo(proto.Message): r"""A Proximity criterion. The geo point and radius determine what geographical area is included. The address is a description of the geo point that does not affect ad serving. There are two ways to create a proximity. First, by setting an address and radius. The geo point will be automatically computed. Second, by setting a geo point and radius. The address is an optional label that won't be validated. Attributes: geo_point (google.ads.googleads.v8.common.types.GeoPointInfo): Latitude and longitude. radius (float): The radius of the proximity. radius_units (google.ads.googleads.v8.enums.types.ProximityRadiusUnitsEnum.ProximityRadiusUnits): The unit of measurement of the radius. Default is KILOMETERS. address (google.ads.googleads.v8.common.types.AddressInfo): Full address. """ geo_point = proto.Field(proto.MESSAGE, number=1, message="GeoPointInfo",) radius = proto.Field(proto.DOUBLE, number=5, optional=True,) radius_units = proto.Field( proto.ENUM, number=3, enum=proximity_radius_units.ProximityRadiusUnitsEnum.ProximityRadiusUnits, ) address = proto.Field(proto.MESSAGE, number=4, message="AddressInfo",) class GeoPointInfo(proto.Message): r"""Geo point for proximity criterion. Attributes: longitude_in_micro_degrees (int): Micro degrees for the longitude. latitude_in_micro_degrees (int): Micro degrees for the latitude. """ longitude_in_micro_degrees = proto.Field( proto.INT32, number=3, optional=True, ) latitude_in_micro_degrees = proto.Field( proto.INT32, number=4, optional=True, ) class AddressInfo(proto.Message): r"""Address for proximity criterion. Attributes: postal_code (str): Postal code. province_code (str): Province or state code. country_code (str): Country code. province_name (str): Province or state name. street_address (str): Street address line 1. street_address2 (str): Street address line 2. This field is write-only. It is only used for calculating the longitude and latitude of an address when geo_point is empty. city_name (str): Name of the city. """ postal_code = proto.Field(proto.STRING, number=8, optional=True,) province_code = proto.Field(proto.STRING, number=9, optional=True,) country_code = proto.Field(proto.STRING, number=10, optional=True,) province_name = proto.Field(proto.STRING, number=11, optional=True,) street_address = proto.Field(proto.STRING, number=12, optional=True,) street_address2 = proto.Field(proto.STRING, number=13, optional=True,) city_name = proto.Field(proto.STRING, number=14, optional=True,) class TopicInfo(proto.Message): r"""A topic criterion. Use topics to target or exclude placements in the Google Display Network based on the category into which the placement falls (for example, "Pets & Animals/Pets/Dogs"). Attributes: topic_constant (str): The Topic Constant resource name. path (Sequence[str]): The category to target or exclude. Each subsequent element in the array describes a more specific sub-category. For example, "Pets & Animals", "Pets", "Dogs" represents the "Pets & Animals/Pets/Dogs" category. """ topic_constant = proto.Field(proto.STRING, number=3, optional=True,) path = proto.RepeatedField(proto.STRING, number=4,) class LanguageInfo(proto.Message): r"""A language criterion. Attributes: language_constant (str): The language constant resource name. """ language_constant = proto.Field(proto.STRING, number=2, optional=True,) class IpBlockInfo(proto.Message): r"""An IpBlock criterion used for IP exclusions. We allow: - IPv4 and IPv6 addresses - individual addresses (192.168.0.1) - masks for individual addresses (192.168.0.1/32) - masks for Class C networks (192.168.0.1/24) Attributes: ip_address (str): The IP address of this IP block. """ ip_address = proto.Field(proto.STRING, number=2, optional=True,) class ContentLabelInfo(proto.Message): r"""Content Label for category exclusion. Attributes: type_ (google.ads.googleads.v8.enums.types.ContentLabelTypeEnum.ContentLabelType): Content label type, required for CREATE operations. """ type_ = proto.Field( proto.ENUM, number=1, enum=content_label_type.ContentLabelTypeEnum.ContentLabelType, ) class CarrierInfo(proto.Message): r"""Represents a Carrier Criterion. Attributes: carrier_constant (str): The Carrier constant resource name. """ carrier_constant = proto.Field(proto.STRING, number=2, optional=True,) class UserInterestInfo(proto.Message): r"""Represents a particular interest-based topic to be targeted. Attributes: user_interest_category (str): The UserInterest resource name. """ user_interest_category = proto.Field(proto.STRING, number=2, optional=True,) class WebpageInfo(proto.Message): r"""Represents a criterion for targeting webpages of an advertiser's website. Attributes: criterion_name (str): The name of the criterion that is defined by this parameter. The name value will be used for identifying, sorting and filtering criteria with this type of parameters. This field is required for CREATE operations and is prohibited on UPDATE operations. conditions (Sequence[google.ads.googleads.v8.common.types.WebpageConditionInfo]): Conditions, or logical expressions, for webpage targeting. The list of webpage targeting conditions are and-ed together when evaluated for targeting. This field is required for CREATE operations and is prohibited on UPDATE operations. coverage_percentage (float): Website criteria coverage percentage. This is the computed percentage of website coverage based on the website target, negative website target and negative keywords in the ad group and campaign. For instance, when coverage returns as 1, it indicates it has 100% coverage. This field is read-only. sample (google.ads.googleads.v8.common.types.WebpageSampleInfo): List of sample urls that match the website target. This field is read-only. """ criterion_name = proto.Field(proto.STRING, number=3, optional=True,) conditions = proto.RepeatedField( proto.MESSAGE, number=2, message="WebpageConditionInfo", ) coverage_percentage = proto.Field(proto.DOUBLE, number=4,) sample = proto.Field(proto.MESSAGE, number=5, message="WebpageSampleInfo",) class WebpageConditionInfo(proto.Message): r"""Logical expression for targeting webpages of an advertiser's website. Attributes: operand (google.ads.googleads.v8.enums.types.WebpageConditionOperandEnum.WebpageConditionOperand): Operand of webpage targeting condition. operator (google.ads.googleads.v8.enums.types.WebpageConditionOperatorEnum.WebpageConditionOperator): Operator of webpage targeting condition. argument (str): Argument of webpage targeting condition. """ operand = proto.Field( proto.ENUM, number=1, enum=webpage_condition_operand.WebpageConditionOperandEnum.WebpageConditionOperand, ) operator = proto.Field( proto.ENUM, number=2, enum=webpage_condition_operator.WebpageConditionOperatorEnum.WebpageConditionOperator, ) argument = proto.Field(proto.STRING, number=4, optional=True,) class WebpageSampleInfo(proto.Message): r"""List of sample urls that match the website target Attributes: sample_urls (Sequence[str]): Webpage sample urls """ sample_urls = proto.RepeatedField(proto.STRING, number=1,) class OperatingSystemVersionInfo(proto.Message): r"""Represents an operating system version to be targeted. Attributes: operating_system_version_constant (str): The operating system version constant resource name. """ operating_system_version_constant = proto.Field( proto.STRING, number=2, optional=True, ) class AppPaymentModelInfo(proto.Message): r"""An app payment model criterion. Attributes: type_ (google.ads.googleads.v8.enums.types.AppPaymentModelTypeEnum.AppPaymentModelType): Type of the app payment model. """ type_ = proto.Field( proto.ENUM, number=1, enum=app_payment_model_type.AppPaymentModelTypeEnum.AppPaymentModelType, ) class MobileDeviceInfo(proto.Message): r"""A mobile device criterion. Attributes: mobile_device_constant (str): The mobile device constant resource name. """ mobile_device_constant = proto.Field(proto.STRING, number=2, optional=True,) class CustomAffinityInfo(proto.Message): r"""A custom affinity criterion. A criterion of this type is only targetable. Attributes: custom_affinity (str): The CustomInterest resource name. """ custom_affinity = proto.Field(proto.STRING, number=2, optional=True,) class CustomIntentInfo(proto.Message): r"""A custom intent criterion. A criterion of this type is only targetable. Attributes: custom_intent (str): The CustomInterest resource name. """ custom_intent = proto.Field(proto.STRING, number=2, optional=True,) class LocationGroupInfo(proto.Message): r"""A radius around a list of locations specified via a feed. Attributes: feed (str): Feed specifying locations for targeting. This is required and must be set in CREATE operations. geo_target_constants (Sequence[str]): Geo target constant(s) restricting the scope of the geographic area within the feed. Currently only one geo target constant is allowed. radius (int): Distance in units specifying the radius around targeted locations. This is required and must be set in CREATE operations. radius_units (google.ads.googleads.v8.enums.types.LocationGroupRadiusUnitsEnum.LocationGroupRadiusUnits): Unit of the radius. Miles and meters are supported for geo target constants. Milli miles and meters are supported for feed item sets. This is required and must be set in CREATE operations. feed_item_sets (Sequence[str]): FeedItemSets whose FeedItems are targeted. If multiple IDs are specified, then all items that appear in at least one set are targeted. This field cannot be used with geo_target_constants. This is optional and can only be set in CREATE operations. """ feed = proto.Field(proto.STRING, number=5, optional=True,) geo_target_constants = proto.RepeatedField(proto.STRING, number=6,) radius = proto.Field(proto.INT64, number=7, optional=True,) radius_units = proto.Field( proto.ENUM, number=4, enum=location_group_radius_units.LocationGroupRadiusUnitsEnum.LocationGroupRadiusUnits, ) feed_item_sets = proto.RepeatedField(proto.STRING, number=8,) class CustomAudienceInfo(proto.Message): r"""A custom audience criterion. Attributes: custom_audience (str): The CustomAudience resource name. """ custom_audience = proto.Field(proto.STRING, number=1,) class CombinedAudienceInfo(proto.Message): r"""A combined audience criterion. Attributes: combined_audience (str): The CombinedAudience resource name. """ combined_audience = proto.Field(proto.STRING, number=1,) class KeywordThemeInfo(proto.Message): r"""A Smart Campaign keyword theme. Attributes: keyword_theme_constant (str): The resource name of a Smart Campaign keyword theme constant. ``keywordThemeConstants/{keyword_theme_id}~{sub_keyword_theme_id}`` free_form_keyword_theme (str): Free-form text to be matched to a Smart Campaign keyword theme constant on a best-effort basis. """ keyword_theme_constant = proto.Field( proto.STRING, number=1, oneof="keyword_theme", ) free_form_keyword_theme = proto.Field( proto.STRING, number=2, oneof="keyword_theme", ) __all__ = tuple(sorted(__protobuf__.manifest))
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, unicode_literals, division, print_function) import inspect from copy import deepcopy import pytest import numpy as np from numpy.testing.utils import assert_allclose, assert_array_equal from ...extern.six.moves import cPickle as pickle from ..core import Model, ModelDefinitionError from ..parameters import Parameter from ..models import (Const1D, Shift, Scale, Rotation2D, Gaussian1D, Gaussian2D, Polynomial1D, Polynomial2D, Chebyshev2D, Legendre2D, Chebyshev1D, Legendre1D, AffineTransformation2D, Identity, Mapping) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x ** y, 8.0)]) def test_two_model_class_arithmetic_1d(expr, result): # Const1D is perhaps the simplest model to test basic arithmetic with. # TODO: Should define more tests later on for more complicated # combinations of models S = expr(Const1D, Const1D) assert issubclass(S, Model) assert S.n_inputs == 1 assert S.n_outputs == 1 # Initialize an instance of the model, providing values for the two # "amplitude" parameters s = S(2, 3) # It shouldn't matter what input we evaluate on since this is a constant # function out = s(0) assert out == result assert isinstance(out, float) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, [5.0, 5.0]), (lambda x, y: x - y, [-1.0, -1.0]), (lambda x, y: x * y, [6.0, 6.0]), (lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]), (lambda x, y: x ** y, [8.0, 8.0])]) def test_model_set(expr, result): s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2)) out = s(0, model_set_axis=False) assert_array_equal(out, result) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, [5.0, 5.0]), (lambda x, y: x - y, [-1.0, -1.0]), (lambda x, y: x * y, [6.0, 6.0]), (lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]), (lambda x, y: x ** y, [8.0, 8.0])]) def test_model_set_raises_value_error(expr, result): """Check that creating model sets with components whose _n_models are different raise a value error """ with pytest.raises(ValueError): s = expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1)) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x ** y, 8.0)]) def test_two_model_instance_arithmetic_1d(expr, result): """ Like test_two_model_class_arithmetic_1d, but creates a new model from two model *instances* with fixed parameters. """ s = expr(Const1D(2), Const1D(3)) assert isinstance(s, Model) assert s.n_inputs == 1 assert s.n_outputs == 1 out = s(0) assert out == result assert isinstance(out, float) @pytest.mark.parametrize(('expr', 'result'), [(lambda x, y: x + y, 5.0), (lambda x, y: x - y, -1.0), (lambda x, y: x * y, 6.0), (lambda x, y: x / y, 2.0 / 3.0), (lambda x, y: x ** y, 8.0)]) def test_two_model_mixed_arithmetic_1d(expr, result): """ Like test_two_model_class_arithmetic_1d, but creates a new model from an expression of one model class with one model instance (and vice-versa). """ S1 = expr(Const1D, Const1D(3)) S2 = expr(Const1D(2), Const1D) for cls in (S1, S2): assert issubclass(cls, Model) assert cls.n_inputs == 1 assert cls.n_outputs == 1 # Requires values for both amplitudes even though one of them them has a # default # TODO: We may wish to fix that eventually, so that if a parameter has a # default it doesn't *have* to be given in the init s1 = S1(2, 3) s2 = S2(2, 3) for out in (s1(0), s2(0)): assert out == result assert isinstance(out, float) def test_simple_two_model_class_compose_1d(): """ Shift and Scale are two of the simplest models to test model composition with. """ S1 = Shift | Scale # First shift then scale assert issubclass(S1, Model) assert S1.n_inputs == 1 assert S1.n_outputs == 1 s1 = S1(2, 3) # Shift by 2 and scale by 3 assert s1(1) == 9.0 S2 = Scale | Shift # First scale then shift assert issubclass(S2, Model) assert S2.n_inputs == 1 assert S2.n_outputs == 1 s2 = S2(2, 3) # Scale by 2 then shift by 3 assert s2(1) == 5.0 # Test with array inputs assert_array_equal(s2([1, 2, 3]), [5.0, 7.0, 9.0]) def test_simple_two_model_class_compose_2d(): """ A simple example consisting of two rotations. """ R = Rotation2D | Rotation2D assert issubclass(R, Model) assert R.n_inputs == 2 assert R.n_outputs == 2 r1 = R(45, 45) # Rotate twice by 45 degrees assert_allclose(r1(0, 1), (-1, 0), atol=1e-10) r2 = R(90, 90) # Rotate twice by 90 degrees assert_allclose(r2(0, 1), (0, -1), atol=1e-10) # Compose R with itself to produce 4 rotations R2 = R | R r3 = R2(45, 45, 45, 45) assert_allclose(r3(0, 1), (0, -1), atol=1e-10) def test_n_submodels(): """ Test that CompoundModel.n_submodels properly returns the number of components. """ g2 = Gaussian1D() + Gaussian1D() assert g2.n_submodels() == 2 g3 = g2 + Gaussian1D() assert g3.n_submodels() == 3 g5 = g3 | g2 assert g5.n_submodels() == 5 g7 = g5 / g2 assert g7.n_submodels() == 7 # make sure it works as class method p = Polynomial1D + Polynomial1D assert p.n_submodels() == 2 def test_expression_formatting(): """ Test that the expression strings from compound models are formatted correctly. """ # For the purposes of this test it doesn't matter a great deal what # model(s) are used in the expression, I don't think G = Gaussian1D G2 = Gaussian2D M = G + G assert M._format_expression() == '[0] + [1]' M = G + G + G assert M._format_expression() == '[0] + [1] + [2]' M = G + G * G assert M._format_expression() == '[0] + [1] * [2]' M = G * G + G assert M._format_expression() == '[0] * [1] + [2]' M = G + G * G + G assert M._format_expression() == '[0] + [1] * [2] + [3]' M = (G + G) * (G + G) assert M._format_expression() == '([0] + [1]) * ([2] + [3])' # This example uses parentheses in the expression, but those won't be # preserved in the expression formatting since they technically aren't # necessary, and there's no way to know that they were originally # parenthesized (short of some deep, and probably not worthwhile # introspection) M = (G * G) + (G * G) assert M._format_expression() == '[0] * [1] + [2] * [3]' M = G ** G assert M._format_expression() == '[0] ** [1]' M = G + G ** G assert M._format_expression() == '[0] + [1] ** [2]' M = (G + G) ** G assert M._format_expression() == '([0] + [1]) ** [2]' M = G + G | G assert M._format_expression() == '[0] + [1] | [2]' M = G + (G | G) assert M._format_expression() == '[0] + ([1] | [2])' M = G & G | G2 assert M._format_expression() == '[0] & [1] | [2]' M = G & (G | G) assert M._format_expression() == '[0] & ([1] | [2])' def test_indexing_on_class(): """ Test indexing on compound model class objects, including cases where the submodels are classes, as well as instances, or both. """ g = Gaussian1D(1, 2, 3, name='g') p = Polynomial1D(2, name='p') M = Gaussian1D + Const1D assert M[0] is Gaussian1D assert M[1] is Const1D assert M['Gaussian1D'] is M[0] assert M['Const1D'] is M[1] M = Gaussian1D + p assert M[0] is Gaussian1D assert isinstance(M['p'], Polynomial1D) m = g + p assert isinstance(m[0], Gaussian1D) assert isinstance(m[1], Polynomial1D) assert isinstance(m['g'], Gaussian1D) assert isinstance(m['p'], Polynomial1D) # Test negative indexing assert isinstance(m[-1], Polynomial1D) assert isinstance(m[-2], Gaussian1D) with pytest.raises(IndexError): m[42] with pytest.raises(IndexError): m['foobar'] # TODO: It would be good if there were an easier way to interrogate a compound # model class for what expression it represents. Not sure what that would look # like though. def test_slicing_on_class(): """ Test slicing a simple compound model class using integers. """ A = Const1D.rename('A') B = Const1D.rename('B') C = Const1D.rename('C') D = Const1D.rename('D') E = Const1D.rename('E') F = Const1D.rename('F') M = A + B - C * D / E ** F assert M[0:1] is A # This test will also check that the correct parameter names are generated # for each slice (fairly trivial in this case since all the submodels have # the same parameter, but if any corner cases are found that aren't covered # by this test we can do something different...) assert M[0:1].param_names == ('amplitude',) # This looks goofy but if you slice by name to the sub-model of the same # name it should just return that model, logically. assert M['A':'A'] is A assert M['A':'A'].param_names == ('amplitude',) assert M[5:6] is F assert M[5:6].param_names == ('amplitude',) assert M['F':'F'] is F assert M['F':'F'].param_names == ('amplitude',) # 1 + 2 assert M[:2](1, 2)(0) == 3 assert M[:2].param_names == ('amplitude_0', 'amplitude_1') assert M[:'B'](1, 2)(0) == 3 assert M[:'B'].param_names == ('amplitude_0', 'amplitude_1') # 2 - 3 assert M[1:3](2, 3)(0) == -1 assert M[1:3].param_names == ('amplitude_1', 'amplitude_2') assert M['B':'C'](2, 3)(0) == -1 assert M['B':'C'].param_names == ('amplitude_1', 'amplitude_2') # 3 * 4 assert M[2:4](3, 4)(0) == 12 assert M[2:4].param_names == ('amplitude_2', 'amplitude_3') assert M['C':'D'](3, 4)(0) == 12 assert M['C':'D'].param_names == ('amplitude_2', 'amplitude_3') # 4 / 5 assert M[3:5](4, 5)(0) == 0.8 assert M[3:5].param_names == ('amplitude_3', 'amplitude_4') assert M['D':'E'](4, 5)(0) == 0.8 assert M['D':'E'].param_names == ('amplitude_3', 'amplitude_4') # 5 ** 6 assert M[4:6](5, 6)(0) == 15625 assert M[4:6].param_names == ('amplitude_4', 'amplitude_5') assert M['E':'F'](5, 6)(0) == 15625 assert M['E':'F'].param_names == ('amplitude_4', 'amplitude_5') def test_slicing_on_instance(): """ Test slicing a simple compound model class using integers. """ A = Const1D.rename('A') B = Const1D.rename('B') C = Const1D.rename('C') D = Const1D.rename('D') E = Const1D.rename('E') F = Const1D.rename('F') M = A + B - C * D / E ** F m = M(1, 2, 3, 4, 5, 6) assert isinstance(m[0:1], A) assert isinstance(m['A':'A'], A) assert isinstance(m[5:6], F) assert isinstance(m['F':'F'], F) # 1 + 2 assert m[:'B'](0) == 3 assert m[:'B'].param_names == ('amplitude_0', 'amplitude_1') assert np.all(m[:'B'].parameters == [1, 2]) # 2 - 3 assert m['B':'C'](0) == -1 assert m['B':'C'].param_names == ('amplitude_1', 'amplitude_2') assert np.all(m['B':'C'].parameters == [2, 3]) # 3 * 4 assert m['C':'D'](0) == 12 assert m['C':'D'].param_names == ('amplitude_2', 'amplitude_3') assert np.all(m['C':'D'].parameters == [3, 4]) # 4 / 5 assert m['D':'E'](0) == 0.8 assert m['D':'E'].param_names == ('amplitude_3', 'amplitude_4') assert np.all(m['D':'E'].parameters == [4, 5]) # 5 ** 6 assert m['E':'F'](0) == 15625 assert m['E':'F'].param_names == ('amplitude_4', 'amplitude_5') assert np.all(m['E':'F'].parameters == [5, 6]) def test_indexing_on_instance(): """Test indexing on compound model instances.""" M = Gaussian1D + Const1D m = M(1, 0, 0.1, 2) assert isinstance(m[0], Gaussian1D) assert isinstance(m[1], Const1D) assert isinstance(m['Gaussian1D'], Gaussian1D) assert isinstance(m['Const1D'], Const1D) # Test parameter equivalence assert m[0].amplitude == 1 == m.amplitude_0 assert m[0].mean == 0 == m.mean_0 assert m[0].stddev == 0.1 == m.stddev_0 assert m[1].amplitude == 2 == m.amplitude_1 # Test that parameter value updates are symmetric between the compound # model and the submodel returned by indexing const = m[1] m.amplitude_1 = 42 assert const.amplitude == 42 const.amplitude = 137 assert m.amplitude_1 == 137 # Similar couple of tests, but now where the compound model was created # from model instances g = Gaussian1D(1, 2, 3, name='g') p = Polynomial1D(2, name='p') m = g + p assert m[0].name == 'g' assert m[1].name == 'p' assert m['g'].name == 'g' assert m['p'].name == 'p' poly = m[1] m.c0_1 = 12345 assert poly.c0 == 12345 poly.c1 = 6789 assert m.c1_1 == 6789 # Ensure this did *not* modify the original models we used as templates assert p.c0 == 0 assert p.c1 == 0 # Test negative indexing assert isinstance(m[-1], Polynomial1D) assert isinstance(m[-2], Gaussian1D) with pytest.raises(IndexError): m[42] with pytest.raises(IndexError): m['foobar'] def test_basic_compound_inverse(): """ Test basic inversion of compound models in the limited sense supported for models made from compositions and joins only. """ t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90) assert_allclose(t.inverse(*t(0, 1)), (0, 1)) @pytest.mark.parametrize('model', [ Shift(0) + Shift(0) | Shift(0), Shift(0) - Shift(0) | Shift(0), Shift(0) * Shift(0) | Shift(0), Shift(0) / Shift(0) | Shift(0), Shift(0) ** Shift(0) | Shift(0), Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)]) def test_compound_unsupported_inverse(model): """ Ensure inverses aren't supported in cases where it shouldn't be. """ with pytest.raises(NotImplementedError): model.inverse def test_mapping_basic_permutations(): """ Tests a couple basic examples of the Mapping model--specifically examples that merely permute the outputs. """ x, y = Rotation2D(90)(1, 2) RS = Rotation2D | Mapping((1, 0)) x_prime, y_prime = RS(90)(1, 2) assert_allclose((x, y), (y_prime, x_prime)) # A more complicated permutation M = Rotation2D & Scale m = M(90, 2) x, y, z = m(1, 2, 3) MS = M | Mapping((2, 0, 1)) ms = MS(90, 2) x_prime, y_prime, z_prime = ms(1, 2, 3) assert_allclose((x, y, z), (y_prime, z_prime, x_prime)) def test_mapping_inverse(): """Tests inverting a compound model that includes a `Mapping`.""" RS = Rotation2D & Scale # Rotates 2 of the coordinates and scales the third--then rotates on a # different axis and scales on the axis of rotation. No physical meaning # here just a simple test M = RS | Mapping([2, 0, 1]) | RS m = M(12.1, 13.2, 14.3, 15.4) assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08) def test_identity_input(): """ Test a case where an Identity (or Mapping) model is the first in a chain of composite models and thus is responsible for handling input broadcasting properly. Regression test for https://github.com/astropy/astropy/pull/3362 """ ident1 = Identity(1) shift = Shift(1) rotation = Rotation2D(angle=90) model = ident1 & shift | rotation assert_allclose(model(1, 2), [-3.0, 1.0]) # Same test case but using class composition TestModel = ident1 & Shift | Rotation2D model = TestModel(offset_1=1, angle_2=90) assert_allclose(model(1, 2), [-3.0, 1.0]) def test_slicing_on_instances_2(): """ More slicing tests. Regression test for https://github.com/embray/astropy/pull/10 """ model_a = Shift(1, name='a') model_b = Shift(2, name='b') model_c = Rotation2D(3, name='c') model_d = Scale(2, name='d') model_e = Scale(3, name='e') m = (model_a & model_b) | model_c | (model_d & model_e) with pytest.raises(ModelDefinitionError): # The slice can't actually be taken since the resulting model cannot be # evaluated assert m[1:].submodel_names == ('b', 'c', 'd', 'e') assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e') assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e') with pytest.raises(ModelDefinitionError): assert m['c':'d'].submodel_names == ('c', 'd') assert m[1:2].name == 'b' assert m[2:7].submodel_names == ('c', 'd', 'e') with pytest.raises(IndexError): m['x'] with pytest.raises(IndexError): m['a' : 'r'] with pytest.raises(ModelDefinitionError): assert m[-4:4].submodel_names == ('b', 'c', 'd') with pytest.raises(ModelDefinitionError): assert m[-4:-2].submodel_names == ('b', 'c') def test_slicing_on_instances_3(): """ Like `test_slicing_on_instances_2` but uses a compound model that does not have any invalid slices due to the resulting model being invalid (originally test_slicing_on_instances_2 passed without any ModelDefinitionErrors being raised, but that was before we prevented invalid models from being created). """ model_a = Shift(1, name='a') model_b = Shift(2, name='b') model_c = Gaussian1D(3, 0, 0.1, name='c') model_d = Scale(2, name='d') model_e = Scale(3, name='e') m = (model_a + model_b) | model_c | (model_d + model_e) assert m[1:].submodel_names == ('b', 'c', 'd', 'e') assert m[:].submodel_names == ('a', 'b', 'c', 'd', 'e') assert m['a':].submodel_names == ('a', 'b', 'c', 'd', 'e') assert m['c':'d'].submodel_names == ('c', 'd') assert m[1:2].name == 'b' assert m[2:7].submodel_names == ('c', 'd', 'e') with pytest.raises(IndexError): m['x'] with pytest.raises(IndexError): m['a' : 'r'] assert m[-4:4].submodel_names == ('b', 'c', 'd') assert m[-4:-2].submodel_names == ('b', 'c') def test_slicing_on_instance_with_parameterless_model(): """ Regression test to fix an issue where the indices attached to parameter names on a compound model were not handled properly when one or more submodels have no parameters. This was especially evident in slicing. """ p2 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) p1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) mapping = Mapping((0, 1, 0, 1)) offx = Shift(-2, name='x_translation') offy = Shift(-1, name='y_translation') aff = AffineTransformation2D(matrix=[[1, 2], [3, 4]], name='rotation') model = mapping | (p1 & p2) | (offx & offy) | aff assert model.param_names == ('c0_0_1', 'c1_0_1', 'c0_1_1', 'c0_0_2', 'c1_0_2', 'c0_1_2', 'offset_3', 'offset_4', 'matrix_5', 'translation_5') assert model(1, 2) == (23.0, 53.0) m = model[3:] assert m.param_names == ('offset_3', 'offset_4', 'matrix_5', 'translation_5') assert m(1, 2) == (1.0, 1.0) def test_compound_model_with_nonstandard_broadcasting(): """ Ensure that the ``standard_broadcasting`` flag is properly propagated when creating compound models. See the commit message for the commit in which this was added for more details. """ offx = Shift(1) offy = Shift(2) rot = AffineTransformation2D([[0, -1], [1, 0]]) m = (offx & offy) | rot x, y = m(0, 0) assert x == -2 assert y == 1 # make sure conversion back to scalars is working properly assert isinstance(x, float) assert isinstance(y, float) x, y = m([0, 1, 2], [0, 1, 2]) assert np.all(x == [-2, -3, -4]) assert np.all(y == [1, 2, 3]) def test_compound_model_classify_attributes(): """ Regression test for an issue raised here: https://github.com/astropy/astropy/pull/3231#discussion_r22221123 The issue is that part of the `help` implementation calls a utility function called `inspect.classify_class_attrs`, which was leading to an infinite recursion. This is a useful test in its own right just in that it tests that compound models can be introspected in some useful way without crashing--this works as sort of a test of its somewhat complicated internal state management. This test does not check any of the results of `~inspect.classify_class_attrs`, though it might be useful to at some point. """ inspect.classify_class_attrs(Gaussian1D + Gaussian1D) def test_invalid_operands(): """ Test that certain operators do not work with models whose inputs/outputs do not match up correctly. """ with pytest.raises(ModelDefinitionError): Rotation2D | Gaussian1D with pytest.raises(ModelDefinitionError): Rotation2D(90) | Gaussian1D(1, 0, 0.1) with pytest.raises(ModelDefinitionError): Rotation2D + Gaussian1D with pytest.raises(ModelDefinitionError): Rotation2D(90) + Gaussian1D(1, 0, 0.1) class _ConstraintsTestA(Model): stddev = Parameter(default=0, min=0, max=0.3) mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(stddev, mean): return stddev, mean class _ConstraintsTestB(Model): mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(mean): return mean @pytest.mark.parametrize('model', [Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) + Gaussian1D(fixed={'mean': True}), (_ConstraintsTestA + _ConstraintsTestB)()]) def test_inherit_constraints(model): """ Various tests for copying of constraint values between compound models and their members. There are two versions of this test: One where a compound model is created from two model instances, and another where a compound model is created from two model classes that have default constraints set on some of their parameters. Regression test for https://github.com/astropy/astropy/issues/3481 """ # We have to copy the model before modifying it, otherwise the test fails # if it is run twice in a row, because the state of the model instance # would be preserved from one run to the next. model = deepcopy(model) # Lots of assertions in this test as there are multiple interfaces to # parameter constraints assert 'stddev_0' in model.bounds assert model.bounds['stddev_0'] == (0, 0.3) assert model.stddev_0.bounds == (0, 0.3) assert 'mean_0' in model.fixed assert model.fixed['mean_0'] is True assert model.mean_0.fixed is True assert 'mean_1' in model.fixed assert model.fixed['mean_1'] is True assert model.mean_1.fixed is True # Great, all the constraints were inherited properly # Now what about if we update them through the sub-models? model[0].stddev.bounds = (0, 0.4) assert model.bounds['stddev_0'] == (0, 0.4) assert model.stddev_0.bounds == (0, 0.4) assert model[0].stddev.bounds == (0, 0.4) assert model[0].bounds['stddev'] == (0, 0.4) model[0].bounds['stddev'] = (0.1, 0.5) assert model.bounds['stddev_0'] == (0.1, 0.5) assert model.stddev_0.bounds == (0.1, 0.5) assert model[0].stddev.bounds == (0.1, 0.5) assert model[0].bounds['stddev'] == (0.1, 0.5) model[1].mean.fixed = False assert model.fixed['mean_1'] is False assert model.mean_1.fixed is False assert model[1].mean.fixed is False assert model[1].fixed['mean'] is False model[1].fixed['mean'] = True assert model.fixed['mean_1'] is True assert model.mean_1.fixed is True assert model[1].mean.fixed is True assert model[1].fixed['mean'] is True def test_compound_custom_inverse(): """ Test that a compound model with a custom inverse has that inverse applied when the inverse of another model, of which it is a component, is computed. Regression test for https://github.com/astropy/astropy/issues/3542 """ poly = Polynomial1D(1, c0=1, c1=2) scale = Scale(1) shift = Shift(1) model1 = poly | scale model1.inverse = poly # model1 now has a custom inverse (the polynomial itself, ignoring the # trivial scale factor) model2 = shift | model1 assert_allclose(model2.inverse(1), (poly | shift.inverse)(1)) # Make sure an inverse is not allowed if the models were combined with the # wrong operator, or if one of the models doesn't have an inverse defined with pytest.raises(NotImplementedError): (shift + model1).inverse with pytest.raises(NotImplementedError): (model1 & poly).inverse @pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2), Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)]) def test_compound_with_polynomials(poly): """ Tests that polynomials are scaled when used in compound models. Issue #3699 """ poly.parameters = [1, 2, 3, 4, 1, 2] shift = Shift(3) model = poly | shift x, y = np.mgrid[:20, :37] result_compound = model(x, y) result = shift(poly(x, y)) assert_allclose(result, result_compound) # has to be defined at module level since pickling doesn't work right (in # general) for classes defined in functions class _TestPickleModel(Gaussian1D + Gaussian1D): pass @pytest.mark.skipif(str("sys.version_info < (2, 7, 3)")) def test_pickle_compound(): """ Regression test for https://github.com/astropy/astropy/issues/3867#issuecomment-114547228 """ # Test pickling a compound model class GG = Gaussian1D + Gaussian1D GG2 = pickle.loads(pickle.dumps(GG)) assert GG.param_names == GG2.param_names assert GG.__name__ == GG2.__name__ # Test that it works, or at least evaluates successfully assert GG()(0.12345) == GG2()(0.12345) # Test pickling a compound model instance g1 = Gaussian1D(1.0, 0.0, 0.1) g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3]) m = g1 + g2 m2 = pickle.loads(pickle.dumps(m)) assert m.param_names == m2.param_names assert m.__class__.__name__ == m2.__class__.__name__ assert np.all(m.parameters == m2.parameters) assert np.all(m(0) == m2(0)) # Test pickling a concrete class p = pickle.dumps(_TestPickleModel, protocol=0) # Note: This is very dependent on the specific protocol, but the point of # this test is that the "concrete" model is pickled in a very simple way # that only specifies the module and class name, and is unpickled by # re-importing the class from the module in which it was defined. This # should still work for concrete subclasses of compound model classes that # were dynamically generated through an expression exp = b'castropy.modeling.tests.test_compound\n_TestPickleModel\np0\n.' # When testing against the expected value we drop the memo length field # at the end, which may differ between runs assert p[:p.rfind(b'p')] == exp[:exp.rfind(b'p')] assert pickle.loads(p) is _TestPickleModel @pytest.mark.skipif(str("sys.version_info >= (2, 7, 3)")) def test_pickle_compound_fallback(): """ Test fallback for pickling compound model on old versions of Python affected by http://bugs.python.org/issue7689 """ gg = (Gaussian1D + Gaussian1D)() with pytest.raises(RuntimeError): pickle.dumps(gg) def test_update_parameters(): offx = Shift(1) scl = Scale(2) m = offx | scl assert(m(1) == 4) offx.offset=42 assert(m(1) == 4) m.factor_1 = 100 assert(m(1) == 200) m2 = m | offx assert(m2(1) == 242) def test_name(): offx = Shift(1) scl = Scale(2) m = offx | scl scl.name = "scale" assert m._submodel_names == ('None_0', 'None_1') assert m.name is None m.name = "M" assert m.name == "M" m1 = m.rename("M1") assert m.name == "M" assert m1.name == "M1"
""" Functions to help configure LAtools. (c) Oscar Branson : https://github.com/oscarbranson """ import configparser import json import os import re import numpy as np import textwrap as tw import pkg_resources as pkgrs from .utils import Bunch from .metadata_parsers import meta_parsers from io import BytesIO from shutil import copyfile line_width = 80 # functions used by latools to read configurations def read_configuration(config='DEFAULT'): """ Read LAtools configuration file, and return parameters as dict. """ # read configuration file _, conf = read_latoolscfg() # if 'DEFAULT', check which is the default configuration if config == 'DEFAULT': config = conf['DEFAULT']['config'] try: # grab the chosen configuration conf = dict(conf[config]) # update config name with chosen conf['config'] = config return conf except KeyError: msg = "\nError: '{}' is not a valid configuration.\n".format(config) msg += 'Please use one of:\n DEFAULT\n ' + '\n '.join(conf.sections()) print(msg + '\n') raise KeyError('Invalid configuration: {}'.format(config)) def config_locator(): """ Returns the path to the file containing your LAtools configurations. """ return pkgrs.resource_filename('latools', 'latools.cfg') # under-the-hood functions def read_latoolscfg(): """ Reads configuration, returns a ConfigParser object. Distinct from read_configuration, which returns a dict. """ config_file = pkgrs.resource_filename('latools', 'latools.cfg') cf = configparser.ConfigParser() cf.read(config_file) return config_file, cf # convenience functions for configuring LAtools def locate(): """ Prints and returns the location of the latools.cfg file. """ loc = pkgrs.resource_filename('latools', 'latools.cfg') print(loc) return loc def print_all(): """ Prints all currently defined configurations. """ # read configuration file _, conf = read_latoolscfg() default = conf['DEFAULT']['config'] pstr = '\nCurrently defined LAtools configurations:\n\n' for s in conf.sections(): if s == default: pstr += s + ' [DEFAULT]\n' elif s == 'REPRODUCE': pstr += s + ' [DO NOT ALTER]\n' else: pstr += s + '\n' for k, v in conf[s].items(): if k != 'config': if v[:9] == 'resources': v = pkgrs.resource_filename('latools', v) pstr += ' ' + k + ': ' + v + '\n' pstr += '\n' print(pstr) return def copy_SRM_file(destination=None, config='DEFAULT'): """ Creates a copy of the default SRM table at the specified location. Parameters ---------- destination : str The save location for the SRM file. If no location specified, saves it as 'LAtools_[config]_SRMTable.csv' in the current working directory. config : str It's possible to set up different configurations with different SRM files. This specifies the name of the configuration that you want to copy the SRM file from. If not specified, the 'DEFAULT' configuration is used. """ # find SRM file from configuration conf = read_configuration() src = pkgrs.resource_filename('latools', conf['srmfile']) # work out destination path (if not given) if destination is None: destination = './LAtools_' + conf['config'] + '_SRMTable.csv' if os.path.isdir(destination): destination += 'LAtools_' + conf['config'] + '_SRMTable.csv' copyfile(src, destination) print(src + ' \n copied to:\n ' + destination) return def create(config_name, srmfile=None, dataformat=None, base_on='DEFAULT', make_default=False): """ Adds a new configuration to latools.cfg. Parameters ---------- config_name : str The name of the new configuration. This should be descriptive (e.g. UC Davis Foram Group) srmfile : str (optional) The location of the srm file used for calibration. dataformat : str (optional) The location of the dataformat definition to use. base_on : str The name of the existing configuration to base the new one on. If either srm_file or dataformat are not specified, the new config will copy this information from the base_on config. make_default : bool Whether or not to make the new configuration the default for future analyses. Default = False. Returns ------- None """ base_config = read_configuration(base_on) # read config file config_file, cf = read_latoolscfg() # if config doesn't already exist, create it. if config_name not in cf.sections(): cf.add_section(config_name) # set parameter values if dataformat is None: dataformat = base_config['dataformat'] cf.set(config_name, 'dataformat', dataformat) if srmfile is None: srmfile = base_config['srmfile'] cf.set(config_name, 'srmfile', srmfile) # make the parameter set default, if requested if make_default: cf.set('DEFAULT', 'config', config_name) with open(config_file, 'w') as f: cf.write(f) return def update(config, parameter, new_value): # read config file config_file, cf = read_latoolscfg() if config == 'REPRODUCE': print("Nope. This will break LAtools. Don't do it.") pstr = 'Are you sure you want to change the {:s} parameter of the {:s} configuration?\n It will be changed from:\n {:s}\n to:\n {:s}\n> [N/y]: ' response = input(pstr.format(parameter, config, cf[config][parameter], new_value)) if response.lower() == 'y': cf.set(config, parameter, new_value) with open(config_file, 'w') as f: cf.write(f) print(' Configuration updated!') else: print(' Done nothing.') return def delete(config): # read config file config_file, cf = read_latoolscfg() if config == cf['DEFAULT']['config']: print("Nope. You're not allowed to delete the default configuration.\n" + "Please change the default configuration, and then try again.") if config == 'REPRODUCE': print("Nope. This will break LAtools. Don't do it.") pstr = 'Are you sure you want to delete the {:s} configuration?\n> [N/y]: ' response = input(pstr.format(config)) if response.lower() == 'y': cf.remove_section(config) with open(config_file, 'w') as f: cf.write(f) print(' Configuration deleted!') else: print(' Done nothing.') return def change_default(config): """ Change the default configuration. """ config_file, cf = read_latoolscfg() if config not in cf.sections(): raise ValueError("\n'{:s}' is not a defined configuration.".format(config)) if config == 'REPRODUCE': pstr = ('Are you SURE you want to set REPRODUCE as your default configuration?\n' + ' ... this is an odd thing to be doing.') else: pstr = ('Are you sure you want to change the default configuration from {:s}'.format(cf['DEFAULT']['config']) + 'to {:s}?'.format(config)) response = input(pstr + '\n> [N/y]: ') if response.lower() == 'y': cf.set('DEFAULT', 'config', config) with open(config_file, 'w') as f: cf.write(f) print(' Default changed!') else: print(' Done nothing.') def get_dataformat_template(destination='./LAtools_dataformat_template.json'): """ Copies a data format description JSON template to the specified location. """ template_file = pkgrs.resource_filename('latools', 'resources/data_formats/dataformat_template.json') copyfile(template_file, destination) return # tools for developing a valid dataformat description def test_dataformat(data_file, dataformat_file, name_mode='file_names'): """ Test a data formatfile against a particular data file. This goes through all the steps of data import printing out the results of each step, so you can see where the import fails. Parameters ---------- data_file : str Path to data file, including extension. dataformat : dict or str A dataformat dict, or path to file. See example below. name_mode : str How to identyfy sample names. If 'file_names' uses the input name of the file, stripped of the extension. If 'metadata_names' uses the 'name' attribute of the 'meta' sub-dictionary in dataformat. If any other str, uses this str as the sample name. Example ------- >>> {'genfromtext_args': {'delimiter': ',', 'skip_header': 4}, # passed directly to np.genfromtxt 'column_id': {'name_row': 3, # which row contains the column names 'delimiter': ',', # delimeter between column names 'timecolumn': 0, # which column contains the 'time' variable 'pattern': '([A-z]{1,2}[0-9]{1,3})'}, # a regex pattern which captures the column names 'meta_regex': { # a dict of (line_no: ([descriptors], [regexs])) pairs 0: (['path'], '(.*)'), 2: (['date', 'method'], # MUST include date '([A-Z][a-z]+ [0-9]+ [0-9]{4}[ ]+[0-9:]+ [amp]+).* ([A-z0-9]+\.m)') } } Returns ------- sample, analytes, data, meta : tuple """ if isinstance(dataformat_file, dict): dataformat = dataformat_file dataformat_file = '[dict provided]' print('*************************************************\n' + 'Testing suitability of data format description...\n' + ' Dataformat File: {:s}'.format(dataformat_file) + '\n' + ' Data File: {:s}'.format(data_file) + '\n' + '*************************************************') print('\n Test: open data file...') with open(data_file) as f: lines = f.readlines() print(' Success!') if dataformat_file != '[dict provided]': print('\n Test: read dataformat file...') # if dataformat is not a dict, load the json try: with open(dataformat_file) as f: dataformat = json.load(f) print(' Success!') except: print(" ***PROBLEM: The dataformat file isn't in a valid .json format") raise print("\n Test: read metadata using 'metadata_regex'...") meta = Bunch() if 'meta_regex' in dataformat.keys(): got = [] for k, v in dataformat['meta_regex'].items(): rep = ' Line "{:s}":'.format(k) try: if k.isdigit(): line = lines[int(k)] else: pattern = k.split('__')[-1] for line in lines: if pattern in line: break out = re.search(v[-1], line).groups() print(rep) for i in np.arange(len(v[0])): meta[v[0][i]] = out[i] print(' ' + v[0][i] + ': ' + out[i]) got.append(v[0][i]) except: print(rep + '\n ***PROBLEM in meta_regex:\n' + ' [' + ', '.join(v[0]) + '] cannot be derived from "' + v[1] + '".\n' + ' Test regex against: "{:s}"'.format(line.strip()) + '\n' + ' at https://regex101.com/.') raise print(' Finished - does the above look correct?') if 'date' not in got: print(' ***PROBLEM: ' + 'meta_regex must identify data collection start time as "date". LAtools may not behave as expected.') # raise ValueError('meta_regex must identify "date" attribute, containing data collection start time') else: print(' ***PROBLEM: ' + 'meta_regex not specified. At minimum, must identify data collection start time as "date". LAtools may not behave as expected.') # raise ValueError('meta_regex must identify "date" attribute, containing data collection start time') if 'metaparse_function' in dataformat.keys(): fn_name, (start, stop) = dataformat['metaparse_function'] print(f"\n Test: parsing additional metadata using '{fn_name}' function...") print(f" applying to lines {start}-{stop}.") extra_meta = meta_parsers[fn_name](lines[start:stop]) print("\n Does the below look correct?") print(extra_meta) meta.update(extra_meta) # sample name print('\n Test: Sample Name IDs...') if name_mode == 'file_names': sample = os.path.basename(data_file).split('.')[0] print(' Sample name grabbed from file: ' + sample) else: try: sample = meta['name'] print(' Sample name from metadata_regex: ' + sample) except KeyError: print(' ***PROBLEM: Sample name not identified by metadata_regex - please include "name".') raise print(' ***Is the sample name correct?***') # column and analyte names print('\n Test: Getting Column Names...') if isinstance(dataformat['column_id']['name_row'], int): print(' Getting names from line {:.0f}: '.format(dataformat['column_id']['name_row'])) name_row = dataformat['column_id']['name_row'] column_line = lines[dataformat['column_id']['name_row']] elif isinstance(dataformat['column_id']['name_row'], str): print(' Column row not provided, using first line containing "{}": '.format(dataformat['column_id']['name_row'])) for name_row, column_line in enumerate(lines): if dataformat['column_id']['name_row'] in column_line: break columns = np.array(column_line.strip().split(dataformat['column_id']['delimiter'])) # return tw.wrap(', '.join(columns), line_width) print(' Columns found:\n' + '\n'.join(tw.wrap(', '.join(columns), line_width, subsequent_indent=' ', initial_indent=' '))) if 'pattern' in dataformat['column_id'].keys(): print(' Cleaning up using column_id/pattern...') pr = re.compile(dataformat['column_id']['pattern']) analytes = [pr.match(c).groups()[0] for c in columns if pr.match(c)] if len(analytes) == 0: raise ValueError('no analyte names identified. Check pattern in column_id section.') print(' Cleaned Analyte Names: \n' + '\n'.join(tw.wrap(', '.join(analytes), line_width, subsequent_indent=' ', initial_indent=' '))) print(' ***This should only contain analyte names... does it?***') # do any required pre-formatting if 'preformat_replace' in dataformat.keys(): print('\n Test: preformat_replace...') with open(data_file) as f: fbuffer = f.read() for k, v in dataformat['preformat_replace'].items(): print(' replacing "' + k + '" with "' + v + '"') fbuffer = re.sub(k, v, fbuffer) fdata = BytesIO(fbuffer.encode()) else: fdata = data_file print(' Done.') print('\n Test: Reading Data...') # if skip_header not provided, calculate it. if 'skip_header' not in dataformat['genfromtext_args']: print(' "skip_header" not specified... finding header size.') skip_header = name_row + 1 while not lines[skip_header].strip(): skip_header += 1 dataformat['genfromtext_args']['skip_header'] = skip_header print(' -> Header is {} lines long'.format(skip_header)) # read data try: read_data = np.genfromtxt(data_file, **dataformat['genfromtext_args']).T print(' Success!') except: print(' ***PROBLEM during data read - check genfromtext_args.\n' + ' If they look correct, think about including preformat_replace terms?') raise # print(' checking number of columns...') # if read_data.shape[0] != len(analytes) + 1: # print('***\nPROBLEM:\n' + # 'There are {:.0f} data columns, but {:.0f} column names.\n'.format(read_data.shape[0], len(analytes) + 1) + # ' This shouldn't be a problem # ' Check your identification of column names, or your pre-formatting parameters.\n***') # # raise ValueError('Data - Column Name mismatch') # else: # print(' Success!') # data dict print('\n Test: Combine data into dictionary...') dind = np.ones(read_data.shape[0], dtype=bool) dind[dataformat['column_id']['timecolumn']] = False data = Bunch() print(' Time in column {:.0f} ({:s})'.format(dataformat['column_id']['timecolumn'], columns[dataformat['column_id']['timecolumn']])) data['Time'] = read_data[dataformat['column_id']['timecolumn']] for a, d in zip(analytes, read_data[dind]): data[a] = d print(' Calculating total counts...') data['total_counts'] = np.nansum(read_data[dind], axis=0) print(' Success!') print('\nTests completed successfully.\n' + ' **This does not necessarily mean everything has worked!**\n' + ' Look carefully through the output of this function, and\n' + ' make sure it looks right.\n\n' + 'Outputs are: sample_name, analytes, data_dict, metadata_dict') return sample, analytes, data, meta
import math import os import shutil import subprocess import shlex import pandas as pd from cea import suppress_3rd_party_debug_loggers from cea.resources.radiation_daysim.geometry_generator import BuildingGeometry suppress_3rd_party_debug_loggers() import py4design.py2radiance as py2radiance from py4design.py3dmodel.fetch import points_frm_occface class CEADaySim(object): """ This class helps to initialize the Daysim folder structure in the `staging_path` and encapsulates all the methods required to create the initial input files for Daysim (i.e radiance material file, radiance geometry file and weather file) which are needed for Daysim projects. It also initializes Daysim projects with the help of the `DaySimProject` class The staging folder is split into 2 folders, 'common_inputs' and 'projects'. 'common_inputs' store the created input files which are shared among any running Daysim projects, 'projects' store the folders of any running Daysim projects This splitting allows Daysim projects to be run parallel with multiprocessing :param str staging_path: Path where to create Daysim Project :param str daysim_dir: Directory where Daysim binaries are found """ def __init__(self, staging_path, daysim_dir): self.common_inputs = os.path.join(staging_path, 'common_inputs') self.projects_dir = os.path.join(staging_path, 'projects') self.daysim_dir = daysim_dir self._create_folders() # Raw input files (radiance material and geometry) self.rad_material_path = os.path.join(self.common_inputs, 'radiance_material.rad') self.rad_geometry_path = os.path.join(self.common_inputs, 'radiance_geometry.rad') # Common generated input files self.daysim_material_path = os.path.join(self.common_inputs, 'daysim_material.rad') self.daysim_geometry_path = os.path.join(self.common_inputs, 'daysim_geometry.rad') self.wea_weather_path = os.path.join(self.common_inputs, 'weather_60min.wea') # Header Properties self.site_info = None def _create_folders(self): if not os.path.exists(self.common_inputs): os.makedirs(self.common_inputs) if not os.path.exists(self.projects_dir): os.makedirs(self.projects_dir) def initialize_daysim_project(self, project_name): """ Returns a DaySimProject object that initializes a Daysim project with the name `project_name` :param str project_name: Name of Daysim project :return DaySimProject: """ return DaySimProject(project_name, self.projects_dir, self.daysim_dir, self.daysim_material_path, self.daysim_geometry_path, self.wea_weather_path, self.site_info) def create_radiance_material(self, building_surface_properties): add_rad_mat(self.rad_material_path, building_surface_properties) def create_radiance_geometry(self, geometry_terrain, building_surface_properties, zone_building_names, surroundings_building_names, geometry_pickle_dir): create_rad_geometry(self.rad_geometry_path, geometry_terrain, building_surface_properties, zone_building_names, surroundings_building_names, geometry_pickle_dir) @staticmethod def run_cmd(cmd, cwd=None): print('Running command `{}`{}'.format(cmd, '' if cwd is None else ' in `{}`'.format(cwd))) try: # Stops script if commands fail (i.e non-zero exit code) subprocess.check_call(shlex.split(cmd), cwd=cwd, stderr=subprocess.STDOUT, env=os.environ) except TypeError as error: if error.message == "environment can only contain strings": for key in os.environ.keys(): value = os.environ[key] if not isinstance(value, str): print("Bad ENVIRON key: {key}={value} ({value_type})".format( key=key, value=value, value_type=type(value))) raise error @staticmethod def generate_project_header(project_name, project_directory, tmp_directory, daysim_bin_directory): return "project_name {project_name}\n" \ "project_directory {project_directory}\n" \ "bin_directory {bin_directory}\n" \ "tmp_directory {tmp_directory}\n".format(project_name=project_name, project_directory=project_directory, bin_directory=daysim_bin_directory, tmp_directory=tmp_directory) @staticmethod def generate_geometry_header(daysim_material_path, daysim_geometry_path): return "material_file {daysim_material_path}\n" \ "geometry_file {daysim_geometry_path}\n".format(daysim_material_path=daysim_material_path, daysim_geometry_path=daysim_geometry_path) @staticmethod def generate_site_info_header(site_info, wea_weather_path): return "{site_info}" \ "time_step 60\n" \ "wea_data_short_file {wea_weather_path}\n" \ "wea_data_short_file_units 1\n" \ "lower_direct_threshold 2\n" \ "lower_diffuse_threshold 2\n".format(site_info=site_info, wea_weather_path=wea_weather_path) def execute_epw2wea(self, epw_weather_path, ground_reflectance=0.2): command = 'epw2wea "{epw_weather_path}" "{wea_weather_path}"'.format(epw_weather_path=epw_weather_path, wea_weather_path=self.wea_weather_path) print(f'Running command `{command}`') # get site information from stdout of epw2wea epw2wea_result = subprocess.run(shlex.split(command), stdout=subprocess.PIPE) site_headers = epw2wea_result.stdout.decode('utf-8') self.site_info = "{epw2wea_output}\n" \ "ground_reflectance {ground_reflectance}\n".format( epw2wea_output="\n".join(site_headers.split("\r\n")), ground_reflectance=ground_reflectance) # Save info of original epw file weather_info_path = os.path.join(self.common_inputs, "weather_info.txt") with open(weather_info_path, "w") as weather_info: weather_info.write('# Original epw file: {epw_weather_path}'.format(epw_weather_path=epw_weather_path)) weather_info.write(self.site_info) def execute_radfiles2daysim(self): hea_path = os.path.join(self.common_inputs, "rad2daysim.hea") tmp_path = os.path.join(self.common_inputs, "tmp", "") project_header = self.generate_project_header("rad2daysim", self.common_inputs, tmp_path, self.daysim_dir) geometry_header = self.generate_geometry_header(os.path.basename(self.daysim_material_path), os.path.basename(self.daysim_geometry_path)) # create header file for radfiles2daysim with open(hea_path, "w") as hea_file: building_info = "{project_header}\n" \ "{geometry_header}\n" \ "radiance_source_files 2, {rad_material_path}, {rad_geometry_path}\n".format( project_header=project_header, geometry_header=geometry_header, rad_material_path=self.rad_material_path, rad_geometry_path=self.rad_geometry_path) hea_file.write(building_info) command1 = 'radfiles2daysim "{hea_path}" -g -m -d'.format(hea_path=hea_path) self.run_cmd(command1) class DaySimProject(object): def __init__(self, project_name, project_directory, daysim_bin_directory, daysim_material_path, daysim_geometry_path, wea_weather_path, site_info): # Project info self.project_name = project_name self.project_directory = project_directory # make sure folder paths have trailing slash (gen_dc will fail otherwise) self.project_path = os.path.join(project_directory, project_name, "") self.tmp_directory = os.path.join(self.project_path, "tmp", "") self.daysim_bin_directory = os.path.join(daysim_bin_directory, "") self._create_folders() # Input files self.daysim_material_path = daysim_material_path self.daysim_geometry_path = daysim_geometry_path self.wea_weather_path = wea_weather_path self.sensor_path = os.path.join(self.project_path, "sensors.pts") self.hea_path = os.path.join(self.project_path, "{project_name}.hea".format(project_name=project_name)) # Header Properties self.site_info = site_info self._create_project_header_file() def _create_folders(self): if not os.path.exists(self.project_path): os.makedirs(self.project_path) if not os.path.exists(self.tmp_directory): os.makedirs(self.tmp_directory) def _create_project_header_file(self): daysim_material_path = os.path.relpath(self.daysim_material_path, self.project_path) daysim_geometry_path = os.path.relpath(self.daysim_geometry_path, self.project_path) wea_weather_path = os.path.relpath(self.wea_weather_path, self.project_path) project_header = CEADaySim.generate_project_header(self.project_name, self.project_path, self.tmp_directory, self.daysim_bin_directory) geometry_header = CEADaySim.generate_geometry_header(daysim_material_path, daysim_geometry_path) site_info_header = CEADaySim.generate_site_info_header(self.site_info, wea_weather_path) with open(self.hea_path, "w") as hea_file: header = "{project_header}\n" \ "{site_info_header}\n" \ "{geometry_header}\n".format(project_header=project_header, geometry_header=geometry_header, site_info_header=site_info_header) hea_file.write(header) def cleanup_project(self): shutil.rmtree(self.project_path) def create_sensor_input_file(self, sensor_positions, sensor_normals, num_sensors, sensor_file_unit): """ Creates sensor input file and writes its location to the header file output_units <integrer n> n = 1 solar irradiance (W/m2) n = 2 illumiance (lux) :param sensor_positions: :param sensor_normals: :param str sensor_file_unit: the unit for all sensor points (w/m2 or lux) """ sensor_pts_data = py2radiance.write_rad.sensor_file(sensor_positions, sensor_normals) # create sensor file with open(self.sensor_path, "w") as sensor_file: sensor_file.write(sensor_pts_data) # add sensor file location to header file with open(self.hea_path, "a") as hea_file: # write the sensor file location into the .hea sensor_path = os.path.relpath(self.sensor_path, self.project_path) hea_file.write("sensor_file {sensor_path}\n".format(sensor_path=sensor_path)) # write unit for sensor points if sensor_file_unit == "w/m2": hea_file.write("output_units 1\n") if sensor_file_unit == "lux": hea_file.write("output_units 2\n") # Write senor_file_unit to header file # Fix to allow Daysim 5.2 binaries to work, not required for complied binaries from latest branch unit_num = "0" if sensor_file_unit == 'lux' else "2" # 0 = lux, 2 = w/m2 sensor_str = (unit_num + " ") * num_sensors hea_file.write("\nsensor_file_unit {sensor_str}\n".format(sensor_str=sensor_str)) def write_radiance_parameters(self, rad_ab, rad_ad, rad_as, rad_ar, rad_aa, rad_lr, rad_st, rad_sj, rad_lw, rad_dj, rad_ds, rad_dr, rad_dp): """ This function writes the radiance parameters for the Daysim simulation to the header file. :param int rad_ab: Number of ambient bounces. :param int rad_ad: Number of ambient divisions. :param int rad_as: Number of ambient super-samples. :param int rad_ar: Ambient resolution. :param float rad_aa: Ambient accuracy. :param int rad_lr: Maximum number of reflections. :param float rad_st: Specular sampling threshold. :param float rad_sj: Specular sampling jitter. :param float rad_lw: Minimum weight of each ray. :param float rad_dj: Direct jitter. :param float rad_ds: Direct sampling ration. :param int rad_dr: Number of relays from secondary sources. :param int rad_dp: Secondary source pre-sampling density. """ with open(self.hea_path, "a") as hea_file: radiance_parameters = "ab {rad_ab}\n" \ "ad {rad_ad}\n" \ "as {rad_as}\n" \ "ar {rad_ar}\n" \ "aa {rad_aa}\n" \ "lr {rad_lr}\n" \ "st {rad_st}\n" \ "sj {rad_sj}\n" \ "lw {rad_lw}\n" \ "dj {rad_dj}\n" \ "ds {rad_ds}\n" \ "dr {rad_dr}\n" \ "dp {rad_dp}\n".format(rad_ab=rad_ab, rad_ad=rad_ad, rad_as=rad_as, rad_ar=rad_ar, rad_aa=rad_aa, rad_lr=rad_lr, rad_st=rad_st, rad_sj=rad_sj, rad_lw=rad_lw, rad_dj=rad_dj, rad_ds=rad_ds, rad_dr=rad_dr, rad_dp=rad_dp) hea_file.write(radiance_parameters) def write_static_shading(self): """ This function writes the static shading into the header file. `shading 1 <descriptive_string> <file_name.dc> <file_name.ill>` The integer 1 represents static/no shading """ dc_file = "{file_name}.dc".format(file_name=self.project_name) ill_file = "{file_name}.ill".format(file_name=self.project_name) with open(self.hea_path, "a") as hea_file: static_shading = "shading 1 static_system {dc_file} {ill_file}\n".format(dc_file=dc_file, ill_file=ill_file) hea_file.write(static_shading) def execute_gen_dc(self): """ Calculates daylight coefficient files command parameters: -dir calculates direct daylight coefficients only -dif calculates diffuse daylight coefficients only -paste pastes direct and diffuse daylight coefficient output files into a single complete file """ # write the shading header self.write_static_shading() command1 = 'gen_dc "{hea_path}" -dir'.format(hea_path=self.hea_path) command2 = 'gen_dc "{hea_path}" -dif'.format(hea_path=self.hea_path) command3 = 'gen_dc "{hea_path}" -paste'.format(hea_path=self.hea_path) CEADaySim.run_cmd(command1) CEADaySim.run_cmd(command2) CEADaySim.run_cmd(command3) def execute_ds_illum(self): command1 = 'ds_illum "{hea_path}"'.format(hea_path=self.hea_path) CEADaySim.run_cmd(command1) def eval_ill(self): """ This function reads the output file from running `ds_illum`, parses the space separated values and returns the values as a numpy array. :return: Numpy array of hourly irradiance results of sensor points """ ill_path = os.path.join(self.project_path, "{file_name}.ill".format(file_name=self.project_name)) ill_result = pd.read_csv(ill_path, delimiter=' ', header=None).iloc[:, 4:].T.values return ill_result class RadSurface(object): """ An object that contains all the surface information running a Radiance/Daysim simulation. :param str name: Name of surface :param OCC.TopoDS.TopoDS_Face occ_face: Polygon (OCC TopoDS_Face) of surface :param str material: Name of material """ __slots__ = ['name', 'points', 'material'] def __init__(self, name, occ_face, material): self.name = name self.points = points_frm_occface(occ_face) self.material = material def rad(self): """ Returns surface information as Radiance string format. Format from Radiance manual: `modifier` `type` `identifier` `number of string arguments` `string arguments` `number of integer arguments` `integer arguments` `number of decimal arguments` `decimal arguments` In this case, `modifier` would be the name of the material `type` would be 'polygon' there will be no string or integer arguments decimal arguments would be points of vertices :returns str: The surface written into radiance readable string. """ num_of_points = len(self.points) * 3 points = "" for point in self.points: points = points + "{point_x} {point_y} {point_z}\n".format(point_x=point[0], point_y=point[1], point_z=point[2]) surface = "{material} polygon {name}\n" \ "0\n" \ "0\n" \ "{num_of_points}\n" \ "{points}\n".format(material=self.material, name=self.name, num_of_points=num_of_points, points=points) return surface def calc_transmissivity(G_value): """ Calculate window transmissivity from its transmittance using an empirical equation from Radiance. :param float G_value: Solar energy transmittance of windows (dimensionless) :return float: Transmissivity [RADIANCE, 2010] The Radiance 4.0 Synthetic Imaging System. Lawrence Berkeley National Laboratory. """ return (math.sqrt(0.8402528435 + 0.0072522239 * G_value * G_value) - 0.9166530661) / 0.0036261119 / G_value def add_rad_mat(daysim_mat_file, ageometry_table): file_path = daysim_mat_file roughness = 0.02 specularity = 0.03 with open(file_path, 'w') as write_file: # first write the material use for the terrain and surrounding buildings string = "void plastic reflectance0.2\n0\n0\n5 0.5360 0.1212 0.0565 0 0" write_file.writelines(string + '\n') written_mat_name_list = [] for geo in ageometry_table.index.values: mat_name = "wall_" + str(ageometry_table['type_wall'][geo]) if mat_name not in written_mat_name_list: mat_value1 = ageometry_table['r_wall'][geo] mat_value2 = mat_value1 mat_value3 = mat_value1 mat_value4 = specularity mat_value5 = roughness string = "void plastic " + mat_name + "\n0\n0\n5 " + str(mat_value1) + " " + str( mat_value2) + " " + str(mat_value3) \ + " " + str(mat_value4) + " " + str(mat_value5) write_file.writelines('\n' + string + '\n') written_mat_name_list.append(mat_name) mat_name = "win_" + str(ageometry_table['type_win'][geo]) if mat_name not in written_mat_name_list: mat_value1 = calc_transmissivity(ageometry_table['G_win'][geo]) mat_value2 = mat_value1 mat_value3 = mat_value1 string = "void glass " + mat_name + "\n0\n0\n3 " + str(mat_value1) + " " + str(mat_value2) + " " + str( mat_value3) write_file.writelines('\n' + string + '\n') written_mat_name_list.append(mat_name) mat_name = "roof_" + str(ageometry_table['type_roof'][geo]) if mat_name not in written_mat_name_list: mat_value1 = ageometry_table['r_roof'][geo] mat_value2 = mat_value1 mat_value3 = mat_value1 mat_value4 = specularity mat_value5 = roughness string = "void plastic " + mat_name + "\n0\n0\n5 " + str(mat_value1) + " " + str( mat_value2) + " " + str(mat_value3) \ + " " + str(mat_value4) + " " + str(mat_value5) write_file.writelines('\n' + string + '\n') written_mat_name_list.append(mat_name) write_file.close() def terrain_to_radiance(tin_occface_terrain): return [RadSurface("terrain_srf" + str(num), face, "reflectance0.2") for num, face in enumerate(tin_occface_terrain)] def zone_building_to_radiance(building_geometry, building_surface_properties): building_name = building_geometry.name building_surfaces = [] # windows for num, occ_face in enumerate(building_geometry.windows): surface_name = "win_{building_name}_{num}".format(building_name=building_name, num=num) material_name = "win_{material}".format(material=building_surface_properties['type_win'][building_name]) building_surfaces.append(RadSurface(surface_name, occ_face, material_name)) # walls for num, occ_face in enumerate(building_geometry.walls): surface_name = "wall_{building_name}_{num}".format(building_name=building_name, num=num) material_name = "wall_{material}".format(material=building_surface_properties['type_wall'][building_name]) building_surfaces.append(RadSurface(surface_name, occ_face, material_name)) # roofs for num, occ_face in enumerate(building_geometry.roofs): surface_name = "roof_{building_name}_{num}".format(building_name=building_name, num=num) material_name = "roof_{material}".format(material=building_surface_properties['type_roof'][building_name]) building_surfaces.append(RadSurface(surface_name, occ_face, material_name)) return building_surfaces def surrounding_building_to_radiance(building_geometry): building_name = building_geometry.name building_surfaces = [] # walls for num, occ_face in enumerate(building_geometry.walls): surface_name = "surrounding_buildings_wall_{building_name}_{num}".format(building_name=building_name, num=num) building_surfaces.append(RadSurface(surface_name, occ_face, "reflectance0.2")) # roofs for num, occ_face in enumerate(building_geometry.roofs): surface_name = "surrounding_buildings_roof_{building_name}_{num}".format(building_name=building_name, num=num) building_surfaces.append(RadSurface(surface_name, occ_face, "reflectance0.2")) return building_surfaces def create_rad_geometry(file_path, geometry_terrain, building_surface_properties, zone_building_names, surroundings_building_names, geometry_pickle_dir): out = [] for terrain_surface in terrain_to_radiance(geometry_terrain): out.append(terrain_surface.rad()) for building_name in zone_building_names: building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name)) for building_surface in zone_building_to_radiance(building_geometry, building_surface_properties): out.append(building_surface.rad()) for building_name in surroundings_building_names: building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'surroundings', building_name)) for building_surface in surrounding_building_to_radiance(building_geometry): out.append(building_surface.rad()) with open(file_path, "w") as rad_file: rad_file.write(''.join(out))
# firebird.py # Copyright (C) 2005, 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import datetime import warnings from sqlalchemy import util, schema, exceptions, pool from sqlalchemy.sql import compiler from sqlalchemy.engine import default, base from sqlalchemy import types as sqltypes _initialized_kb = False class FBNumeric(sqltypes.Numeric): def get_col_spec(self): if self.precision is None: return "NUMERIC" else: return "NUMERIC(%(precision)s, %(length)s)" % { 'precision': self.precision, 'length' : self.length } def bind_processor(self, dialect): return None def result_processor(self, dialect): if self.asdecimal: return None else: def process(value): if isinstance(value, util.decimal_type): return float(value) else: return value return process class FBFloat(sqltypes.Float): def get_col_spec(self): if not self.precision: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': self.precision} class FBInteger(sqltypes.Integer): def get_col_spec(self): return "INTEGER" class FBSmallInteger(sqltypes.Smallinteger): def get_col_spec(self): return "SMALLINT" class FBDateTime(sqltypes.DateTime): def get_col_spec(self): return "TIMESTAMP" def bind_processor(self, dialect): def process(value): if value is None or isinstance(value, datetime.datetime): return value else: return datetime.datetime(year=value.year, month=value.month, day=value.day) return process class FBDate(sqltypes.DateTime): def get_col_spec(self): return "DATE" class FBTime(sqltypes.Time): def get_col_spec(self): return "TIME" class FBText(sqltypes.TEXT): def get_col_spec(self): return "BLOB SUB_TYPE 1" class FBString(sqltypes.String): def get_col_spec(self): return "VARCHAR(%(length)s)" % {'length' : self.length} class FBChar(sqltypes.CHAR): def get_col_spec(self): return "CHAR(%(length)s)" % {'length' : self.length} class FBBinary(sqltypes.Binary): def get_col_spec(self): return "BLOB SUB_TYPE 0" class FBBoolean(sqltypes.Boolean): def get_col_spec(self): return "SMALLINT" colspecs = { sqltypes.Integer : FBInteger, sqltypes.Smallinteger : FBSmallInteger, sqltypes.Numeric : FBNumeric, sqltypes.Float : FBFloat, sqltypes.DateTime : FBDateTime, sqltypes.Date : FBDate, sqltypes.Time : FBTime, sqltypes.String : FBString, sqltypes.Binary : FBBinary, sqltypes.Boolean : FBBoolean, sqltypes.TEXT : FBText, sqltypes.CHAR: FBChar, } def descriptor(): return {'name':'firebird', 'description':'Firebird', 'arguments':[ ('host', 'Host Server Name', None), ('database', 'Database Name', None), ('user', 'Username', None), ('password', 'Password', None) ]} class FBExecutionContext(default.DefaultExecutionContext): pass class FBDialect(default.DefaultDialect): supports_sane_rowcount = False supports_sane_multi_rowcount = False max_identifier_length = 31 preexecute_sequences = True def __init__(self, type_conv=200, concurrency_level=1, **kwargs): default.DefaultDialect.__init__(self, **kwargs) self.type_conv = type_conv self.concurrency_level= concurrency_level def dbapi(cls): import kinterbasdb return kinterbasdb dbapi = classmethod(dbapi) def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if opts.get('port'): opts['host'] = "%s/%s" % (opts['host'], opts['port']) del opts['port'] opts.update(url.query) type_conv = opts.pop('type_conv', self.type_conv) concurrency_level = opts.pop('concurrency_level', self.concurrency_level) global _initialized_kb if not _initialized_kb and self.dbapi is not None: _initialized_kb = True self.dbapi.init(type_conv=type_conv, concurrency_level=concurrency_level) return ([], opts) def create_execution_context(self, *args, **kwargs): return FBExecutionContext(self, *args, **kwargs) def type_descriptor(self, typeobj): return sqltypes.adapt_type(typeobj, colspecs) def _normalize_name(self, name): # Remove trailing spaces: FB uses a CHAR() type, # that is padded with spaces name = name and name.rstrip() if name is None: return None elif name.upper() == name and not self.identifier_preparer._requires_quotes(name.lower()): return name.lower() else: return name def _denormalize_name(self, name): if name is None: return None elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()): return name.upper() else: return name def table_names(self, connection, schema): s = "SELECT R.RDB$RELATION_NAME FROM RDB$RELATIONS R WHERE R.RDB$SYSTEM_FLAG=0" return [self._normalize_name(row[0]) for row in connection.execute(s)] def has_table(self, connection, table_name, schema=None): tblqry = """ SELECT count(*) FROM RDB$RELATIONS R WHERE R.RDB$RELATION_NAME=?""" c = connection.execute(tblqry, [self._denormalize_name(table_name)]) row = c.fetchone() if row[0] > 0: return True else: return False def is_disconnect(self, e): if isinstance(e, self.dbapi.OperationalError): return 'Unable to complete network request to host' in str(e) else: return False def reflecttable(self, connection, table, include_columns): #TODO: map these better column_func = { 14 : lambda r: sqltypes.String(r['FLEN']), # TEXT 7 : lambda r: sqltypes.Integer(), # SHORT 8 : lambda r: r['FPREC']==0 and sqltypes.Integer() or sqltypes.Numeric(precision=r['FPREC'], length=r['FSCALE'] * -1), #INT or NUMERIC 9 : lambda r: sqltypes.Float(), # QUAD 10 : lambda r: sqltypes.Float(), # FLOAT 27 : lambda r: sqltypes.Float(), # DOUBLE 35 : lambda r: sqltypes.DateTime(), # TIMESTAMP 37 : lambda r: sqltypes.String(r['FLEN']), # VARYING 261: lambda r: sqltypes.TEXT(), # BLOB 40 : lambda r: sqltypes.Char(r['FLEN']), # CSTRING 12 : lambda r: sqltypes.Date(), # DATE 13 : lambda r: sqltypes.Time(), # TIME 16 : lambda r: sqltypes.Numeric(precision=r['FPREC'], length=r['FSCALE'] * -1) #INT64 } tblqry = """ SELECT DISTINCT R.RDB$FIELD_NAME AS FNAME, R.RDB$NULL_FLAG AS NULL_FLAG, R.RDB$FIELD_POSITION, F.RDB$FIELD_TYPE AS FTYPE, F.RDB$FIELD_SUB_TYPE AS STYPE, F.RDB$FIELD_LENGTH AS FLEN, F.RDB$FIELD_PRECISION AS FPREC, F.RDB$FIELD_SCALE AS FSCALE FROM RDB$RELATION_FIELDS R JOIN RDB$FIELDS F ON R.RDB$FIELD_SOURCE=F.RDB$FIELD_NAME WHERE F.RDB$SYSTEM_FLAG=0 and R.RDB$RELATION_NAME=? ORDER BY R.RDB$FIELD_POSITION""" keyqry = """ SELECT SE.RDB$FIELD_NAME SENAME FROM RDB$RELATION_CONSTRAINTS RC JOIN RDB$INDEX_SEGMENTS SE ON RC.RDB$INDEX_NAME=SE.RDB$INDEX_NAME WHERE RC.RDB$CONSTRAINT_TYPE=? AND RC.RDB$RELATION_NAME=?""" fkqry = """ SELECT RC.RDB$CONSTRAINT_NAME CNAME, CSE.RDB$FIELD_NAME FNAME, IX2.RDB$RELATION_NAME RNAME, SE.RDB$FIELD_NAME SENAME FROM RDB$RELATION_CONSTRAINTS RC JOIN RDB$INDICES IX1 ON IX1.RDB$INDEX_NAME=RC.RDB$INDEX_NAME JOIN RDB$INDICES IX2 ON IX2.RDB$INDEX_NAME=IX1.RDB$FOREIGN_KEY JOIN RDB$INDEX_SEGMENTS CSE ON CSE.RDB$INDEX_NAME=IX1.RDB$INDEX_NAME JOIN RDB$INDEX_SEGMENTS SE ON SE.RDB$INDEX_NAME=IX2.RDB$INDEX_NAME AND SE.RDB$FIELD_POSITION=CSE.RDB$FIELD_POSITION WHERE RC.RDB$CONSTRAINT_TYPE=? AND RC.RDB$RELATION_NAME=? ORDER BY SE.RDB$INDEX_NAME, SE.RDB$FIELD_POSITION""" # get primary key fields c = connection.execute(keyqry, ["PRIMARY KEY", self._denormalize_name(table.name)]) pkfields =[self._normalize_name(r['SENAME']) for r in c.fetchall()] # get all of the fields for this table c = connection.execute(tblqry, [self._denormalize_name(table.name)]) found_table = False while True: row = c.fetchone() if row is None: break found_table = True name = self._normalize_name(row['FNAME']) if include_columns and name not in include_columns: continue args = [name] kw = {} # get the data types and lengths coltype = column_func.get(row['FTYPE'], None) if coltype is None: warnings.warn(RuntimeWarning("Did not recognize type '%s' of column '%s'" % (str(row['FTYPE']), name))) coltype = sqltypes.NULLTYPE else: coltype = coltype(row) args.append(coltype) # is it a primary key? kw['primary_key'] = name in pkfields # is it nullable ? kw['nullable'] = not bool(row['NULL_FLAG']) table.append_column(schema.Column(*args, **kw)) if not found_table: raise exceptions.NoSuchTableError(table.name) # get the foreign keys c = connection.execute(fkqry, ["FOREIGN KEY", self._denormalize_name(table.name)]) fks = {} while True: row = c.fetchone() if not row: break cname = self._normalize_name(row['CNAME']) try: fk = fks[cname] except KeyError: fks[cname] = fk = ([], []) rname = self._normalize_name(row['RNAME']) schema.Table(rname, table.metadata, autoload=True, autoload_with=connection) fname = self._normalize_name(row['FNAME']) refspec = rname + '.' + self._normalize_name(row['SENAME']) fk[0].append(fname) fk[1].append(refspec) for name,value in fks.iteritems(): table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1], name=name)) def do_execute(self, cursor, statement, parameters, **kwargs): cursor.execute(statement, parameters or []) def do_rollback(self, connection): connection.rollback(True) def do_commit(self, connection): connection.commit(True) class FBCompiler(compiler.DefaultCompiler): """Firebird specific idiosincrasies""" def visit_alias(self, alias, asfrom=False, **kwargs): # Override to not use the AS keyword which FB 1.5 does not like if asfrom: return self.process(alias.original, asfrom=True, **kwargs) + " " + self.preparer.format_alias(alias, self._anonymize(alias.name)) else: return self.process(alias.original, **kwargs) def visit_function(self, func): if func.clauses: return super(FBCompiler, self).visit_function(func) else: return func.name def default_from(self): return " FROM rdb$database" def visit_sequence(self, seq): return "gen_id(%s, 1)" % self.preparer.format_sequence(seq) def get_select_precolumns(self, select): """Called when building a ``SELECT`` statement, position is just before column list Firebird puts the limit and offset right after the ``SELECT``... """ result = "" if select._limit: result += " FIRST %d " % select._limit if select._offset: result +=" SKIP %d " % select._offset if select._distinct: result += " DISTINCT " return result def limit_clause(self, select): """Already taken care of in the `get_select_precolumns` method.""" return "" class FBSchemaGenerator(compiler.SchemaGenerator): def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) colspec += " " + column.type.dialect_impl(self.dialect).get_col_spec() default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable or column.primary_key: colspec += " NOT NULL" return colspec def visit_sequence(self, sequence): self.append("CREATE GENERATOR %s" % self.preparer.format_sequence(sequence)) self.execute() class FBSchemaDropper(compiler.SchemaDropper): def visit_sequence(self, sequence): self.append("DROP GENERATOR %s" % self.preparer.format_sequence(sequence)) self.execute() class FBDefaultRunner(base.DefaultRunner): def visit_sequence(self, seq): return self.execute_string("SELECT gen_id(%s, 1) FROM rdb$database" % \ self.dialect.identifier_preparer.format_sequence(seq)) RESERVED_WORDS = util.Set( ["action", "active", "add", "admin", "after", "all", "alter", "and", "any", "as", "asc", "ascending", "at", "auto", "autoddl", "avg", "based", "basename", "base_name", "before", "begin", "between", "bigint", "blob", "blobedit", "buffer", "by", "cache", "cascade", "case", "cast", "char", "character", "character_length", "char_length", "check", "check_point_len", "check_point_length", "close", "collate", "collation", "column", "commit", "committed", "compiletime", "computed", "conditional", "connect", "constraint", "containing", "continue", "count", "create", "cstring", "current", "current_connection", "current_date", "current_role", "current_time", "current_timestamp", "current_transaction", "current_user", "cursor", "database", "date", "day", "db_key", "debug", "dec", "decimal", "declare", "default", "delete", "desc", "descending", "describe", "descriptor", "disconnect", "display", "distinct", "do", "domain", "double", "drop", "echo", "edit", "else", "end", "entry_point", "escape", "event", "exception", "execute", "exists", "exit", "extern", "external", "extract", "fetch", "file", "filter", "float", "for", "foreign", "found", "free_it", "from", "full", "function", "gdscode", "generator", "gen_id", "global", "goto", "grant", "group", "group_commit_", "group_commit_wait", "having", "help", "hour", "if", "immediate", "in", "inactive", "index", "indicator", "init", "inner", "input", "input_type", "insert", "int", "integer", "into", "is", "isolation", "isql", "join", "key", "lc_messages", "lc_type", "left", "length", "lev", "level", "like", "logfile", "log_buffer_size", "log_buf_size", "long", "manual", "max", "maximum", "maximum_segment", "max_segment", "merge", "message", "min", "minimum", "minute", "module_name", "month", "names", "national", "natural", "nchar", "no", "noauto", "not", "null", "numeric", "num_log_buffers", "num_log_bufs", "octet_length", "of", "on", "only", "open", "option", "or", "order", "outer", "output", "output_type", "overflow", "page", "pagelength", "pages", "page_size", "parameter", "password", "plan", "position", "post_event", "precision", "prepare", "primary", "privileges", "procedure", "protected", "public", "quit", "raw_partitions", "rdb$db_key", "read", "real", "record_version", "recreate", "references", "release", "release", "reserv", "reserving", "restrict", "retain", "return", "returning_values", "returns", "revoke", "right", "role", "rollback", "row_count", "runtime", "savepoint", "schema", "second", "segment", "select", "set", "shadow", "shared", "shell", "show", "singular", "size", "smallint", "snapshot", "some", "sort", "sqlcode", "sqlerror", "sqlwarning", "stability", "starting", "starts", "statement", "static", "statistics", "sub_type", "sum", "suspend", "table", "terminator", "then", "time", "timestamp", "to", "transaction", "translate", "translation", "trigger", "trim", "type", "uncommitted", "union", "unique", "update", "upper", "user", "using", "value", "values", "varchar", "variable", "varying", "version", "view", "wait", "wait_time", "weekday", "when", "whenever", "where", "while", "with", "work", "write", "year", "yearday" ]) class FBIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def __init__(self, dialect): super(FBIdentifierPreparer,self).__init__(dialect, omit_schema=True) dialect = FBDialect dialect.poolclass = pool.SingletonThreadPool dialect.statement_compiler = FBCompiler dialect.schemagenerator = FBSchemaGenerator dialect.schemadropper = FBSchemaDropper dialect.defaultrunner = FBDefaultRunner dialect.preparer = FBIdentifierPreparer