gt
stringclasses
1 value
context
stringlengths
2.49k
119k
""" Analysis tools for Topographica, other than plotting tools. Configures the interface to the featuremapper and holoviews projects and sets the appropriate Topographica-specific hooks. """ import numpy as np from holoviews.interface.collector import Reference from holoviews import HSV, Image from holoviews.core.options import Compositor from holoviews.ipython import IPTestCase from holoviews.operation import chain, operation, factory, image_overlay import imagen.colorspaces from featuremapper.command import Collector, measure_response import topo from topo.analysis.featureresponses import FeatureResponses, FeatureCurves,\ FeatureMaps, ReverseCorrelation, MeasureResponseCommand, pattern_response,\ topo_metadata_fn, StorageHook, get_feature_preference from topo.base.projection import Projection from topo.base.sheet import Sheet from topo.base.sheetview import CFView from topo.misc.ipython import RunProgress from topo.misc import color from command import measure_cog CoG_spec = "Image.X CoG * Image.Y CoG * Image.BlueChannel" XYCoG = chain.instance(group='XYCoG', name='XYCoG', operations = [image_overlay.instance(spec=CoG_spec), factory.instance()]) Compositor.register(Compositor("Image.X CoG * Image.Y CoG", XYCoG, 'XYCoG', 'display')) import param from holoviews import RGB, ElementOperation from holoviews.operation.normalization import raster_normalization class colorizeHSV(ElementOperation): """ Given an Overlay consisting of two Image elements, colorize the data in the bottom Image with the data in the top Image using the HSV color space. """ group = param.String(default='ColorizedHSV', doc=""" The group string for the colorized output (an RGB element)""") output_type = RGB def _process(self, overlay, key=None): if len(overlay) != 2: raise Exception("colorizeHSV required an overlay of two Image elements as input.") if (len(overlay.get(0).vdims), len(overlay.get(1).vdims)) != (1,1): raise Exception("Each Image element must have single value dimension.") if overlay.get(0).shape != overlay.get(1).shape: raise Exception("Mismatch in the shapes of the data in the Image elements.") hue = overlay.get(1) Hdim = hue.vdims[0] H = hue.clone(hue.data.copy(), vdims=[Hdim(cyclic=True, range=hue.range(Hdim.name))]) normfn = raster_normalization.instance() if self.p.input_ranges: S = normfn.process_element(overlay.get(0), key, *self.p.input_ranges) else: S = normfn.process_element(overlay.get(0), key) C = Image(np.ones(hue.data.shape), bounds=self.get_overlay_bounds(overlay), group='F', label='G') C.vdims[0].range = (0,1) S.vdims[0].range = (0,1) return HSV(H * C * S).relabel(group=self.p.group) Compositor.register( Compositor('CFView.CF Weight * Image.Orientation_Preference', colorizeHSV, 'ColorizedWeights', mode='display')) class TopoIPTestCase(IPTestCase): def __init__(self, *args, **kwargs): super(TopoIPTestCase, self).__init__(*args, **kwargs) @classmethod def register(cls): super(TopoIPTestCase, cls).register() cls.equality_type_funcs[CFView] = cls.compare_cfview return cls.equality_type_funcs @classmethod def compare_cfview(cls, el1, el2, msg='CFView data'): cls.compare_image(el1, el2, msg=msg) class SimRef(Reference): """ A SimRef instance is installed on Collector to allow Topographica model elements to be referenced for collection. This is important to allow pickling and unpickling of Collectors that work correctly with Topographica in different execution environments (e.g. nodes of a cluster) and across different models without directly pickling the components (e.g. Sheets and Projections) themselves. More information about references can be found in the docstring of the holoviews.collector.Reference. """ @property def resolved_type(self): if self.array_ref: return np.ndarray elif isinstance(self.obj, tuple): return Projection else: return Sheet def __init__(self, obj=None, array_ref=None): if topo.sim.model is not None: print "DEPRECATION WARNING: use topo.submodel.specifications instead of SimRef." if [obj, array_ref] == [None,None]: raise Exception("Please specify an object, a path string or an array_ref.") self.array_ref = None if obj is None: self.obj = None self.array_ref = array_ref elif isinstance(obj, str): self.obj = tuple(obj.split('.')) if '.' in obj else obj elif isinstance(obj, Projection): self.obj = (obj.dest.name, obj.name) else: self.obj = obj.name def resolve(self): from topo import sim if isinstance(self.obj, tuple): (sheet, proj) = self.obj return sim[sheet].projections()[proj] elif self.obj: return sim[self.obj] else: return eval('topo.sim.'+self.array_ref) def __repr__(self): if isinstance(self.obj, tuple): return "SimRef(%r)" % '.'.join(el for el in self.obj) elif self.obj is None: return "SimRef(array_ref=%r)" % self.array_ref else: return "SimRef(%r)" % self.obj def __str__(self): if isinstance(self.obj, tuple): return "topo.sim."+'.'.join(el for el in self.obj) elif self.obj is None: return "topo.sim." + self.array_ref else: return "topo.sim."+ self.obj ### Collection hooks Collector.time_fn = topo.sim.time Collector.interval_hook = RunProgress def sheet_hook(obj, *args, **kwargs): """ Return a Image of the Sheet activity. """ return obj[:] def projection_hook(obj, *args, **kwargs): """ Return a Image of the projection activity, otherwise if grid=True, return a Grid of the CFs. """ if kwargs.pop('grid', False): return obj.grid(*args, **kwargs) else: return obj.projection_view() def measurement_hook(obj, *args, **kwargs): return obj(*args, **kwargs) # Configure Collector with appropriate hooks Collector.sim = SimRef Collector.for_type(Sheet, sheet_hook, referencer=SimRef) Collector.for_type(Projection, projection_hook, referencer=SimRef) Collector.for_type(measure_cog, measurement_hook, mode='merge') # Setting default channel operation for ON-OFF visualization op_subtract = operation.instance(output_type=CFView, op=lambda x, k: x.collapse(np.subtract)) Compositor.register(Compositor('CFView.CF_Weight * CFView.CF_Weight', op_subtract, 'OnOff CFs', mode='data')) # Featuremapper hooks def empty_storage_hook(arg): """Use this to unset storage hook because lambda will not work with snapshots. This function is used in notebook_setup.py of the topographica IPython profile. """ pass FeatureResponses.metadata_fns = [topo_metadata_fn] FeatureResponses.pattern_response_fn = pattern_response.instance() FeatureMaps.measurement_storage_hook = StorageHook.instance(sublabel='Maps') FeatureCurves.measurement_storage_hook = StorageHook.instance(sublabel='Curves') ReverseCorrelation.measurement_storage_hook = StorageHook.instance(sublabel='RFs') measure_response.measurement_storage_hook = StorageHook.instance(sublabel=None) measure_cog.measurement_storage_hook = StorageHook.instance(sublabel='CoG') MeasureResponseCommand.preference_lookup_fn = get_feature_preference MeasureResponseCommand.pattern_response_fn = pattern_response.instance() ## Set optimized versions of color conversion functions imagen.colorspaces.rgb_to_hsv = color._rgb_to_hsv_array_opt imagen.colorspaces.hsv_to_rgb = color._hsv_to_rgb_array_opt # Automatically discover all .py files in this directory. import os,fnmatch __all__ = [f.split('.py')[0] for f in os.listdir(__path__[0]) if fnmatch.fnmatch(f,'[!._]*.py')] del f,os,fnmatch
# Copyright (c) 2015 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log from ovs import vlog from dragonflow.controller.common import constants from dragonflow.db.models import qos from dragonflow.db.models import switch from dragonflow.ovsdb import impl_idl LOG = log.getLogger(__name__) OFPORT_RANGE_MIN = 1 OFPORT_RANGE_MAX = 65533 OVS_LOG_FILE_NAME = 'df-ovs.log' class OvsApi(object): """The interface of openvswitch Consumers use this class to set openvswitch or get results from openvswitch. """ def __init__(self, ip, protocol='tcp', port='6640', timeout=10): super(OvsApi, self).__init__() self.ip = ip self.protocol = protocol self.port = port # NOTE: This has to be this name vsctl_timeout, as neutron will use # this attribute to set the timeout of ovs db. self.vsctl_timeout = timeout self.ovsdb = None self.integration_bridge = cfg.CONF.df.integration_bridge if cfg.CONF.log_dir: vlog.Vlog.init(cfg.CONF.log_dir + '/' + OVS_LOG_FILE_NAME) else: vlog.Vlog.init() def initialize(self, db_change_callback): db_connection = ('%s:%s:%s' % (self.protocol, self.ip, self.port)) db_change_callback(None, None, constants.CONTROLLER_SWITCH_SYNC_STARTED, None) self.ovsdb = impl_idl.DFOvsdbApi( db_connection, self.vsctl_timeout, db_change_callback) db_change_callback(None, None, constants.CONTROLLER_SWITCH_SYNC_FINISHED, None) def _db_get_val(self, table, record, column, check_error=False, log_errors=True): return self.ovsdb.db_get(table, record, column).execute( check_error=check_error, log_errors=log_errors) def _get_bridge_for_iface(self, iface_name): return self.ovsdb.iface_to_br(iface_name).execute() def set_controller(self, bridge, targets): self.ovsdb.set_controller(bridge, targets).execute() def set_controller_fail_mode(self, bridge, fail_mode): self.ovsdb.set_fail_mode(bridge, fail_mode).execute() def check_controller(self, target): controllers = self.ovsdb.get_controller( self.integration_bridge).execute() return target in controllers def check_controller_fail_mode(self, fail_mode): return fail_mode == self._db_get_val('Bridge', self.integration_bridge, 'fail_mode') def get_virtual_tunnel_ports(self): ifaces = self.ovsdb.db_find( 'Interface', ('options', '=', {'remote_ip': 'flow'}), columns=['uuid', 'name', 'type']).execute() tunnel_ports = [] for iface in ifaces: if (self.integration_bridge != self._get_bridge_for_iface(iface['name'])): continue tunnel_ports.append( switch.SwitchPort( id=str(iface['uuid']), name=iface['name'], tunnel_type=iface['type'], ), ) return tunnel_ports def add_virtual_tunnel_port(self, tunnel_type, local_ip=None): self.ovsdb.add_virtual_tunnel_port(tunnel_type, local_ip).execute() def delete_port(self, switch_port): self.ovsdb.del_port(switch_port.name, self.integration_bridge).execute() @staticmethod def _check_ofport(port_name, port_num): if port_num is None: LOG.warning("Can't find port_num for port %s.", port_name) return False if port_num < OFPORT_RANGE_MIN or port_num > OFPORT_RANGE_MAX: LOG.warning("port_num %(port_num)s for port %(port)s is invalid.", {'port_num': port_num, 'port': port_name}) return False return True def get_interface_by_id_with_specified_columns(self, port_id, specified_columns): columns = {'external_ids', 'name'} columns.update(specified_columns) ifaces = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), columns=columns).execute() for iface in ifaces: if (self.integration_bridge != self._get_bridge_for_iface(iface['name'])): # iface-id is the port id in neutron, the same neutron port # might create multiple interfaces in different bridges continue return iface def get_port_ofport_by_id(self, port_id): iface = self.get_interface_by_id_with_specified_columns( port_id, {'name', 'ofport'}) if iface and self._check_ofport(iface['name'], iface['ofport']): return iface['ofport'] def get_local_port_mac_in_use(self, port_id): iface = self.get_interface_by_id_with_specified_columns( port_id, {'mac_in_use'}) if iface and netaddr.valid_mac(iface['mac_in_use']): return iface['mac_in_use'] def _get_port_name_by_id(self, port_id): ifaces = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), columns=['external_ids', 'name']).execute() for iface in ifaces: if (self.integration_bridge != self._get_bridge_for_iface(iface['name'])): # iface-id is the port id in neutron, the same neutron port # might create multiple interfaces in different bridges continue return iface['name'] def _gen_link_mapping(self, bridge1, bridge2, bridge1_link_name=None, bridge2_link_name=None): if bridge1_link_name is None: bridge1_link_name = "%s-patch" % bridge2 if bridge2_link_name is None: bridge2_link_name = "%s-patch" % bridge1 LOG.debug('genrated mappings {%(bridge1)s: %(link1)s,' ' %(bridge2)s: %(link2)s}', {'bridge1': bridge1, 'link1': bridge1_link_name, 'bridge2': bridge2, 'link2': bridge2_link_name}) return (bridge1_link_name, bridge2_link_name) def map_patch_to_network(self, network, patch_name): self.bridge_mapping[network] = patch_name def get_phy_network_ofport(self, network): patch_name = self.bridge_mapping.get(network) if patch_name: return self.get_port_ofport(patch_name) def create_patch_pair(self, local_bridge, peer_bridge, local_link_name=None, peer_link_name=None): links = self._gen_link_mapping( local_bridge, peer_bridge, local_link_name, peer_link_name) self._create_patch_port( local_bridge, links[0], links[1]) self._create_patch_port( peer_bridge, links[1], links[0]) return links def _create_patch_port(self, bridge, port, peer_port): if cfg.CONF.df.enable_dpdk: self.ovsdb.add_br(bridge, datapath_type='netdev').execute() else: self.ovsdb.add_br(bridge, datapath_type='system').execute() if not self.patch_port_exist(port): self.ovsdb.add_patch_port(bridge, port, peer_port).execute() def patch_port_exist(self, port): return 'patch' == self._db_get_val('Interface', port, 'type', check_error=False, log_errors=False) def get_port_ofport(self, port): return self._db_get_val('Interface', port, 'ofport', check_error=False, log_errors=False) def get_port_mac_in_use(self, port): return self._db_get_val('Interface', port, 'mac_in_use', check_error=False, log_errors=False) def get_port_qos(self, port_id): port_qoses = self.ovsdb.db_find( 'QoS', ('external_ids', '=', {'iface-id': port_id}), columns=['external_ids', '_uuid']).execute() if port_qoses: ovsdb_qos = port_qoses[0] external_ids = ovsdb_qos['external_ids'] return qos.QosPolicy( id=external_ids.get('qos-id'), topic=external_ids.get('qos-topic'), version=external_ids.get('version'), ) def set_port_qos(self, port_id, qos): port_name = self._get_port_name_by_id(port_id) if not port_name: return max_kbps = qos.get_max_kbps() max_burst_kbps = qos.get_max_burst_kbps() with self.ovsdb.transaction(check_error=True) as txn: qos_uuid = txn.add(self.ovsdb.create_qos(port_id, qos)) txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', max_kbps), ('ingress_policing_burst', max_burst_kbps))) txn.add(self.ovsdb.db_set('Port', port_name, ('qos', qos_uuid))) def update_port_qos(self, port_id, qos): port_name = self._get_port_name_by_id(port_id) if not port_name: return max_kbps = qos.get_max_kbps() max_burst_kbps = qos.get_max_burst_kbps() with self.ovsdb.transaction(check_error=True) as txn: txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', max_kbps), ('ingress_policing_burst', max_burst_kbps))) txn.add(self.ovsdb.update_qos(port_id, qos)) def clear_port_qos(self, port_id): port_name = self._get_port_name_by_id(port_id) if not port_name: return with self.ovsdb.transaction(check_error=True) as txn: txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', 0), ('ingress_policing_burst', 0))) txn.add(self.ovsdb.db_set('Port', port_name, ('qos', []))) txn.add(self.ovsdb.delete_qos(port_id)) def delete_port_qos_and_queue(self, port_id): self.ovsdb.delete_qos(port_id).execute() def get_vtp_ofport(self, tunnel_type): return self.get_port_ofport(tunnel_type + '-vtp')
from __future__ import unicode_literals import datetime import unittest import warnings from decimal import Decimal import pytest from django.conf.urls import url from django.core.exceptions import ImproperlyConfigured from django.db import models from django.test import TestCase from django.test.utils import override_settings from django.utils.dateparse import parse_date from django.utils.six.moves import reload_module from rest_framework import filters, generics, serializers, status from rest_framework.compat import django_filters, reverse from rest_framework.test import APIRequestFactory from .models import BaseFilterableItem, BasicModel, FilterableItem factory = APIRequestFactory() if django_filters: class FilterableItemSerializer(serializers.ModelSerializer): class Meta: model = FilterableItem fields = '__all__' # Basic filter on a list view. class FilterFieldsRootView(generics.ListCreateAPIView): queryset = FilterableItem.objects.all() serializer_class = FilterableItemSerializer filter_fields = ['decimal', 'date'] filter_backends = (filters.DjangoFilterBackend,) # These class are used to test a filter class. class SeveralFieldsFilter(django_filters.FilterSet): text = django_filters.CharFilter(lookup_expr='icontains') decimal = django_filters.NumberFilter(lookup_expr='lt') date = django_filters.DateFilter(lookup_expr='gt') class Meta: model = FilterableItem fields = ['text', 'decimal', 'date'] class FilterClassRootView(generics.ListCreateAPIView): queryset = FilterableItem.objects.all() serializer_class = FilterableItemSerializer filter_class = SeveralFieldsFilter filter_backends = (filters.DjangoFilterBackend,) # These classes are used to test a misconfigured filter class. class MisconfiguredFilter(django_filters.FilterSet): text = django_filters.CharFilter(lookup_expr='icontains') class Meta: model = BasicModel fields = ['text'] class IncorrectlyConfiguredRootView(generics.ListCreateAPIView): queryset = FilterableItem.objects.all() serializer_class = FilterableItemSerializer filter_class = MisconfiguredFilter filter_backends = (filters.DjangoFilterBackend,) class FilterClassDetailView(generics.RetrieveAPIView): queryset = FilterableItem.objects.all() serializer_class = FilterableItemSerializer filter_class = SeveralFieldsFilter filter_backends = (filters.DjangoFilterBackend,) # These classes are used to test base model filter support class BaseFilterableItemFilter(django_filters.FilterSet): text = django_filters.CharFilter() class Meta: model = BaseFilterableItem fields = '__all__' # Test the same filter using the deprecated internal FilterSet class. class BaseFilterableItemFilterWithProxy(filters.FilterSet): text = django_filters.CharFilter() class Meta: model = BaseFilterableItem fields = '__all__' class BaseFilterableItemFilterRootView(generics.ListCreateAPIView): queryset = FilterableItem.objects.all() serializer_class = FilterableItemSerializer filter_class = BaseFilterableItemFilter filter_backends = (filters.DjangoFilterBackend,) class BaseFilterableItemFilterWithProxyRootView(BaseFilterableItemFilterRootView): filter_class = BaseFilterableItemFilterWithProxy # Regression test for #814 class FilterFieldsQuerysetView(generics.ListCreateAPIView): queryset = FilterableItem.objects.all() serializer_class = FilterableItemSerializer filter_fields = ['decimal', 'date'] filter_backends = (filters.DjangoFilterBackend,) class GetQuerysetView(generics.ListCreateAPIView): serializer_class = FilterableItemSerializer filter_class = SeveralFieldsFilter filter_backends = (filters.DjangoFilterBackend,) def get_queryset(self): return FilterableItem.objects.all() urlpatterns = [ url(r'^(?P<pk>\d+)/$', FilterClassDetailView.as_view(), name='detail-view'), url(r'^$', FilterClassRootView.as_view(), name='root-view'), url(r'^get-queryset/$', GetQuerysetView.as_view(), name='get-queryset-view'), ] class BaseFilterTests(TestCase): def setUp(self): self.original_coreapi = filters.coreapi filters.coreapi = True # mock it, because not None value needed self.filter_backend = filters.BaseFilterBackend() def tearDown(self): filters.coreapi = self.original_coreapi def test_filter_queryset_raises_error(self): with pytest.raises(NotImplementedError): self.filter_backend.filter_queryset(None, None, None) def test_get_schema_fields_checks_for_coreapi(self): filters.coreapi = None with pytest.raises(AssertionError): self.filter_backend.get_schema_fields({}) filters.coreapi = True assert self.filter_backend.get_schema_fields({}) == [] class CommonFilteringTestCase(TestCase): def _serialize_object(self, obj): return {'id': obj.id, 'text': obj.text, 'decimal': str(obj.decimal), 'date': obj.date.isoformat()} def setUp(self): """ Create 10 FilterableItem instances. """ base_data = ('a', Decimal('0.25'), datetime.date(2012, 10, 8)) for i in range(10): text = chr(i + ord(base_data[0])) * 3 # Produces string 'aaa', 'bbb', etc. decimal = base_data[1] + i date = base_data[2] - datetime.timedelta(days=i * 2) FilterableItem(text=text, decimal=decimal, date=date).save() self.objects = FilterableItem.objects self.data = [ self._serialize_object(obj) for obj in self.objects.all() ] class IntegrationTestFiltering(CommonFilteringTestCase): """ Integration tests for filtered list views. """ @unittest.skipUnless(django_filters, 'django-filter not installed') def test_backend_deprecation(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") view = FilterFieldsRootView.as_view() request = factory.get('/') response = view(request).render() assert response.status_code == status.HTTP_200_OK assert response.data == self.data self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) self.assertIn("'rest_framework.filters.DjangoFilterBackend' is deprecated.", str(w[-1].message)) @unittest.skipUnless(django_filters, 'django-filter not installed') def test_no_df_deprecation(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") import django_filters.rest_framework class DFFilterFieldsRootView(FilterFieldsRootView): filter_backends = (django_filters.rest_framework.DjangoFilterBackend,) view = DFFilterFieldsRootView.as_view() request = factory.get('/') response = view(request).render() assert response.status_code == status.HTTP_200_OK assert response.data == self.data assert len(w) == 0 @unittest.skipUnless(django_filters, 'django-filter not installed') def test_backend_mro(self): class CustomBackend(filters.DjangoFilterBackend): def filter_queryset(self, request, queryset, view): assert False, "custom filter_queryset should run" class DFFilterFieldsRootView(FilterFieldsRootView): filter_backends = (CustomBackend,) view = DFFilterFieldsRootView.as_view() request = factory.get('/') with pytest.raises(AssertionError, message="custom filter_queryset should run"): view(request).render() @unittest.skipUnless(django_filters, 'django-filter not installed') def test_get_filtered_fields_root_view(self): """ GET requests to paginated ListCreateAPIView should return paginated results. """ view = FilterFieldsRootView.as_view() # Basic test with no filter. request = factory.get('/') response = view(request).render() assert response.status_code == status.HTTP_200_OK assert response.data == self.data # Tests that the decimal filter works. search_decimal = Decimal('2.25') request = factory.get('/', {'decimal': '%s' % search_decimal}) response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal] assert response.data == expected_data # Tests that the date filter works. search_date = datetime.date(2012, 9, 22) request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-09-22' response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if parse_date(f['date']) == search_date] assert response.data == expected_data @unittest.skipUnless(django_filters, 'django-filter not installed') def test_filter_with_queryset(self): """ Regression test for #814. """ view = FilterFieldsQuerysetView.as_view() # Tests that the decimal filter works. search_decimal = Decimal('2.25') request = factory.get('/', {'decimal': '%s' % search_decimal}) response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal] assert response.data == expected_data @unittest.skipUnless(django_filters, 'django-filter not installed') def test_filter_with_get_queryset_only(self): """ Regression test for #834. """ view = GetQuerysetView.as_view() request = factory.get('/get-queryset/') view(request).render() # Used to raise "issubclass() arg 2 must be a class or tuple of classes" # here when neither `model' nor `queryset' was specified. @unittest.skipUnless(django_filters, 'django-filter not installed') def test_get_filtered_class_root_view(self): """ GET requests to filtered ListCreateAPIView that have a filter_class set should return filtered results. """ view = FilterClassRootView.as_view() # Basic test with no filter. request = factory.get('/') response = view(request).render() assert response.status_code == status.HTTP_200_OK assert response.data == self.data # Tests that the decimal filter set with 'lt' in the filter class works. search_decimal = Decimal('4.25') request = factory.get('/', {'decimal': '%s' % search_decimal}) response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if Decimal(f['decimal']) < search_decimal] assert response.data == expected_data # Tests that the date filter set with 'gt' in the filter class works. search_date = datetime.date(2012, 10, 2) request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-10-02' response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if parse_date(f['date']) > search_date] assert response.data == expected_data # Tests that the text filter set with 'icontains' in the filter class works. search_text = 'ff' request = factory.get('/', {'text': '%s' % search_text}) response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if search_text in f['text'].lower()] assert response.data == expected_data # Tests that multiple filters works. search_decimal = Decimal('5.25') search_date = datetime.date(2012, 10, 2) request = factory.get('/', { 'decimal': '%s' % (search_decimal,), 'date': '%s' % (search_date,) }) response = view(request).render() assert response.status_code == status.HTTP_200_OK expected_data = [f for f in self.data if parse_date(f['date']) > search_date and Decimal(f['decimal']) < search_decimal] assert response.data == expected_data @unittest.skipUnless(django_filters, 'django-filter not installed') def test_incorrectly_configured_filter(self): """ An error should be displayed when the filter class is misconfigured. """ view = IncorrectlyConfiguredRootView.as_view() request = factory.get('/') self.assertRaises(AssertionError, view, request) @unittest.skipUnless(django_filters, 'django-filter not installed') def test_base_model_filter(self): """ The `get_filter_class` model checks should allow base model filters. """ view = BaseFilterableItemFilterRootView.as_view() request = factory.get('/?text=aaa') response = view(request).render() assert response.status_code == status.HTTP_200_OK assert len(response.data) == 1 @unittest.skipUnless(django_filters, 'django-filter not installed') def test_base_model_filter_with_proxy(self): """ The `get_filter_class` model checks should allow base model filters. """ view = BaseFilterableItemFilterWithProxyRootView.as_view() request = factory.get('/?text=aaa') response = view(request).render() assert response.status_code == status.HTTP_200_OK assert len(response.data) == 1 @unittest.skipUnless(django_filters, 'django-filter not installed') def test_unknown_filter(self): """ GET requests with filters that aren't configured should return 200. """ view = FilterFieldsRootView.as_view() search_integer = 10 request = factory.get('/', {'integer': '%s' % search_integer}) response = view(request).render() assert response.status_code == status.HTTP_200_OK @override_settings(ROOT_URLCONF='tests.test_filters') class IntegrationTestDetailFiltering(CommonFilteringTestCase): """ Integration tests for filtered detail views. """ def _get_url(self, item): return reverse('detail-view', kwargs=dict(pk=item.pk)) @unittest.skipUnless(django_filters, 'django-filter not installed') def test_get_filtered_detail_view(self): """ GET requests to filtered RetrieveAPIView that have a filter_class set should return filtered results. """ item = self.objects.all()[0] data = self._serialize_object(item) # Basic test with no filter. response = self.client.get(self._get_url(item)) assert response.status_code == status.HTTP_200_OK assert response.data == data # Tests that the decimal filter set that should fail. search_decimal = Decimal('4.25') high_item = self.objects.filter(decimal__gt=search_decimal)[0] response = self.client.get( '{url}'.format(url=self._get_url(high_item)), {'decimal': '{param}'.format(param=search_decimal)}) assert response.status_code == status.HTTP_404_NOT_FOUND # Tests that the decimal filter set that should succeed. search_decimal = Decimal('4.25') low_item = self.objects.filter(decimal__lt=search_decimal)[0] low_item_data = self._serialize_object(low_item) response = self.client.get( '{url}'.format(url=self._get_url(low_item)), {'decimal': '{param}'.format(param=search_decimal)}) assert response.status_code == status.HTTP_200_OK assert response.data == low_item_data # Tests that multiple filters works. search_decimal = Decimal('5.25') search_date = datetime.date(2012, 10, 2) valid_item = self.objects.filter(decimal__lt=search_decimal, date__gt=search_date)[0] valid_item_data = self._serialize_object(valid_item) response = self.client.get( '{url}'.format(url=self._get_url(valid_item)), { 'decimal': '{decimal}'.format(decimal=search_decimal), 'date': '{date}'.format(date=search_date) }) assert response.status_code == status.HTTP_200_OK assert response.data == valid_item_data class SearchFilterModel(models.Model): title = models.CharField(max_length=20) text = models.CharField(max_length=100) class SearchFilterSerializer(serializers.ModelSerializer): class Meta: model = SearchFilterModel fields = '__all__' class SearchFilterTests(TestCase): def setUp(self): # Sequence of title/text is: # # z abc # zz bcd # zzz cde # ... for idx in range(10): title = 'z' * (idx + 1) text = ( chr(idx + ord('a')) + chr(idx + ord('b')) + chr(idx + ord('c')) ) SearchFilterModel(title=title, text=text).save() def test_search(self): class SearchListView(generics.ListAPIView): queryset = SearchFilterModel.objects.all() serializer_class = SearchFilterSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'text') view = SearchListView.as_view() request = factory.get('/', {'search': 'b'}) response = view(request) assert response.data == [ {'id': 1, 'title': 'z', 'text': 'abc'}, {'id': 2, 'title': 'zz', 'text': 'bcd'} ] def test_search_returns_same_queryset_if_no_search_fields_or_terms_provided(self): class SearchListView(generics.ListAPIView): queryset = SearchFilterModel.objects.all() serializer_class = SearchFilterSerializer filter_backends = (filters.SearchFilter,) view = SearchListView.as_view() request = factory.get('/') response = view(request) expected = SearchFilterSerializer(SearchFilterModel.objects.all(), many=True).data assert response.data == expected def test_exact_search(self): class SearchListView(generics.ListAPIView): queryset = SearchFilterModel.objects.all() serializer_class = SearchFilterSerializer filter_backends = (filters.SearchFilter,) search_fields = ('=title', 'text') view = SearchListView.as_view() request = factory.get('/', {'search': 'zzz'}) response = view(request) assert response.data == [ {'id': 3, 'title': 'zzz', 'text': 'cde'} ] def test_startswith_search(self): class SearchListView(generics.ListAPIView): queryset = SearchFilterModel.objects.all() serializer_class = SearchFilterSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', '^text') view = SearchListView.as_view() request = factory.get('/', {'search': 'b'}) response = view(request) assert response.data == [ {'id': 2, 'title': 'zz', 'text': 'bcd'} ] def test_regexp_search(self): class SearchListView(generics.ListAPIView): queryset = SearchFilterModel.objects.all() serializer_class = SearchFilterSerializer filter_backends = (filters.SearchFilter,) search_fields = ('$title', '$text') view = SearchListView.as_view() request = factory.get('/', {'search': 'z{2} ^b'}) response = view(request) assert response.data == [ {'id': 2, 'title': 'zz', 'text': 'bcd'} ] def test_search_with_nonstandard_search_param(self): with override_settings(REST_FRAMEWORK={'SEARCH_PARAM': 'query'}): reload_module(filters) class SearchListView(generics.ListAPIView): queryset = SearchFilterModel.objects.all() serializer_class = SearchFilterSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'text') view = SearchListView.as_view() request = factory.get('/', {'query': 'b'}) response = view(request) assert response.data == [ {'id': 1, 'title': 'z', 'text': 'abc'}, {'id': 2, 'title': 'zz', 'text': 'bcd'} ] reload_module(filters) class AttributeModel(models.Model): label = models.CharField(max_length=32) class SearchFilterModelFk(models.Model): title = models.CharField(max_length=20) attribute = models.ForeignKey(AttributeModel, on_delete=models.CASCADE) class SearchFilterFkSerializer(serializers.ModelSerializer): class Meta: model = SearchFilterModelFk fields = '__all__' class SearchFilterFkTests(TestCase): def test_must_call_distinct(self): filter_ = filters.SearchFilter() prefixes = [''] + list(filter_.lookup_prefixes) for prefix in prefixes: assert not filter_.must_call_distinct( SearchFilterModelFk._meta, ["%stitle" % prefix] ) assert not filter_.must_call_distinct( SearchFilterModelFk._meta, ["%stitle" % prefix, "%sattribute__label" % prefix] ) def test_must_call_distinct_restores_meta_for_each_field(self): # In this test case the attribute of the fk model comes first in the # list of search fields. filter_ = filters.SearchFilter() prefixes = [''] + list(filter_.lookup_prefixes) for prefix in prefixes: assert not filter_.must_call_distinct( SearchFilterModelFk._meta, ["%sattribute__label" % prefix, "%stitle" % prefix] ) class SearchFilterModelM2M(models.Model): title = models.CharField(max_length=20) text = models.CharField(max_length=100) attributes = models.ManyToManyField(AttributeModel) class SearchFilterM2MSerializer(serializers.ModelSerializer): class Meta: model = SearchFilterModelM2M fields = '__all__' class SearchFilterM2MTests(TestCase): def setUp(self): # Sequence of title/text/attributes is: # # z abc [1, 2, 3] # zz bcd [1, 2, 3] # zzz cde [1, 2, 3] # ... for idx in range(3): label = 'w' * (idx + 1) AttributeModel(label=label) for idx in range(10): title = 'z' * (idx + 1) text = ( chr(idx + ord('a')) + chr(idx + ord('b')) + chr(idx + ord('c')) ) SearchFilterModelM2M(title=title, text=text).save() SearchFilterModelM2M.objects.get(title='zz').attributes.add(1, 2, 3) def test_m2m_search(self): class SearchListView(generics.ListAPIView): queryset = SearchFilterModelM2M.objects.all() serializer_class = SearchFilterM2MSerializer filter_backends = (filters.SearchFilter,) search_fields = ('=title', 'text', 'attributes__label') view = SearchListView.as_view() request = factory.get('/', {'search': 'zz'}) response = view(request) assert len(response.data) == 1 def test_must_call_distinct(self): filter_ = filters.SearchFilter() prefixes = [''] + list(filter_.lookup_prefixes) for prefix in prefixes: assert not filter_.must_call_distinct( SearchFilterModelM2M._meta, ["%stitle" % prefix] ) assert filter_.must_call_distinct( SearchFilterModelM2M._meta, ["%stitle" % prefix, "%sattributes__label" % prefix] ) class OrderingFilterModel(models.Model): title = models.CharField(max_length=20, verbose_name='verbose title') text = models.CharField(max_length=100) class OrderingFilterRelatedModel(models.Model): related_object = models.ForeignKey(OrderingFilterModel, related_name="relateds", on_delete=models.CASCADE) class OrderingFilterSerializer(serializers.ModelSerializer): class Meta: model = OrderingFilterModel fields = '__all__' class DjangoFilterOrderingModel(models.Model): date = models.DateField() text = models.CharField(max_length=10) class Meta: ordering = ['-date'] class DjangoFilterOrderingSerializer(serializers.ModelSerializer): class Meta: model = DjangoFilterOrderingModel fields = '__all__' class DjangoFilterOrderingTests(TestCase): def setUp(self): data = [{ 'date': datetime.date(2012, 10, 8), 'text': 'abc' }, { 'date': datetime.date(2013, 10, 8), 'text': 'bcd' }, { 'date': datetime.date(2014, 10, 8), 'text': 'cde' }] for d in data: DjangoFilterOrderingModel.objects.create(**d) @unittest.skipUnless(django_filters, 'django-filter not installed') def test_default_ordering(self): class DjangoFilterOrderingView(generics.ListAPIView): serializer_class = DjangoFilterOrderingSerializer queryset = DjangoFilterOrderingModel.objects.all() filter_backends = (filters.DjangoFilterBackend,) filter_fields = ['text'] ordering = ('-date',) view = DjangoFilterOrderingView.as_view() request = factory.get('/') response = view(request) assert response.data == [ {'id': 3, 'date': '2014-10-08', 'text': 'cde'}, {'id': 2, 'date': '2013-10-08', 'text': 'bcd'}, {'id': 1, 'date': '2012-10-08', 'text': 'abc'} ] class OrderingFilterTests(TestCase): def setUp(self): # Sequence of title/text is: # # zyx abc # yxw bcd # xwv cde for idx in range(3): title = ( chr(ord('z') - idx) + chr(ord('y') - idx) + chr(ord('x') - idx) ) text = ( chr(idx + ord('a')) + chr(idx + ord('b')) + chr(idx + ord('c')) ) OrderingFilterModel(title=title, text=text).save() def test_ordering(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('/', {'ordering': 'text'}) response = view(request) assert response.data == [ {'id': 1, 'title': 'zyx', 'text': 'abc'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 3, 'title': 'xwv', 'text': 'cde'}, ] def test_reverse_ordering(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('/', {'ordering': '-text'}) response = view(request) assert response.data == [ {'id': 3, 'title': 'xwv', 'text': 'cde'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 1, 'title': 'zyx', 'text': 'abc'}, ] def test_incorrecturl_extrahyphens_ordering(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('/', {'ordering': '--text'}) response = view(request) assert response.data == [ {'id': 3, 'title': 'xwv', 'text': 'cde'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 1, 'title': 'zyx', 'text': 'abc'}, ] def test_incorrectfield_ordering(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('/', {'ordering': 'foobar'}) response = view(request) assert response.data == [ {'id': 3, 'title': 'xwv', 'text': 'cde'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 1, 'title': 'zyx', 'text': 'abc'}, ] def test_default_ordering(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('') response = view(request) assert response.data == [ {'id': 3, 'title': 'xwv', 'text': 'cde'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 1, 'title': 'zyx', 'text': 'abc'}, ] def test_default_ordering_using_string(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = 'title' ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('') response = view(request) assert response.data == [ {'id': 3, 'title': 'xwv', 'text': 'cde'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 1, 'title': 'zyx', 'text': 'abc'}, ] def test_ordering_by_aggregate_field(self): # create some related models to aggregate order by num_objs = [2, 5, 3] for obj, num_relateds in zip(OrderingFilterModel.objects.all(), num_objs): for _ in range(num_relateds): new_related = OrderingFilterRelatedModel( related_object=obj ) new_related.save() class OrderingListView(generics.ListAPIView): serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = 'title' ordering_fields = '__all__' queryset = OrderingFilterModel.objects.all().annotate( models.Count("relateds")) view = OrderingListView.as_view() request = factory.get('/', {'ordering': 'relateds__count'}) response = view(request) assert response.data == [ {'id': 1, 'title': 'zyx', 'text': 'abc'}, {'id': 3, 'title': 'xwv', 'text': 'cde'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, ] def test_ordering_with_nonstandard_ordering_param(self): with override_settings(REST_FRAMEWORK={'ORDERING_PARAM': 'order'}): reload_module(filters) class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() serializer_class = OrderingFilterSerializer filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) view = OrderingListView.as_view() request = factory.get('/', {'order': 'text'}) response = view(request) assert response.data == [ {'id': 1, 'title': 'zyx', 'text': 'abc'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 3, 'title': 'xwv', 'text': 'cde'}, ] reload_module(filters) def test_get_template_context(self): class OrderingListView(generics.ListAPIView): ordering_fields = '__all__' serializer_class = OrderingFilterSerializer queryset = OrderingFilterModel.objects.all() filter_backends = (filters.OrderingFilter,) request = factory.get('/', {'ordering': 'title'}, HTTP_ACCEPT='text/html') view = OrderingListView.as_view() response = view(request) self.assertContains(response, 'verbose title') def test_ordering_with_overridden_get_serializer_class(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() filter_backends = (filters.OrderingFilter,) ordering = ('title',) # note: no ordering_fields and serializer_class specified def get_serializer_class(self): return OrderingFilterSerializer view = OrderingListView.as_view() request = factory.get('/', {'ordering': 'text'}) response = view(request) assert response.data == [ {'id': 1, 'title': 'zyx', 'text': 'abc'}, {'id': 2, 'title': 'yxw', 'text': 'bcd'}, {'id': 3, 'title': 'xwv', 'text': 'cde'}, ] def test_ordering_with_improper_configuration(self): class OrderingListView(generics.ListAPIView): queryset = OrderingFilterModel.objects.all() filter_backends = (filters.OrderingFilter,) ordering = ('title',) # note: no ordering_fields and serializer_class # or get_serializer_class specified view = OrderingListView.as_view() request = factory.get('/', {'ordering': 'text'}) with self.assertRaises(ImproperlyConfigured): view(request) class SensitiveOrderingFilterModel(models.Model): username = models.CharField(max_length=20) password = models.CharField(max_length=100) # Three different styles of serializer. # All should allow ordering by username, but not by password. class SensitiveDataSerializer1(serializers.ModelSerializer): username = serializers.CharField() class Meta: model = SensitiveOrderingFilterModel fields = ('id', 'username') class SensitiveDataSerializer2(serializers.ModelSerializer): username = serializers.CharField() password = serializers.CharField(write_only=True) class Meta: model = SensitiveOrderingFilterModel fields = ('id', 'username', 'password') class SensitiveDataSerializer3(serializers.ModelSerializer): user = serializers.CharField(source='username') class Meta: model = SensitiveOrderingFilterModel fields = ('id', 'user') class SensitiveOrderingFilterTests(TestCase): def setUp(self): for idx in range(3): username = {0: 'userA', 1: 'userB', 2: 'userC'}[idx] password = {0: 'passA', 1: 'passC', 2: 'passB'}[idx] SensitiveOrderingFilterModel(username=username, password=password).save() def test_order_by_serializer_fields(self): for serializer_cls in [ SensitiveDataSerializer1, SensitiveDataSerializer2, SensitiveDataSerializer3 ]: class OrderingListView(generics.ListAPIView): queryset = SensitiveOrderingFilterModel.objects.all().order_by('username') filter_backends = (filters.OrderingFilter,) serializer_class = serializer_cls view = OrderingListView.as_view() request = factory.get('/', {'ordering': '-username'}) response = view(request) if serializer_cls == SensitiveDataSerializer3: username_field = 'user' else: username_field = 'username' # Note: Inverse username ordering correctly applied. assert response.data == [ {'id': 3, username_field: 'userC'}, {'id': 2, username_field: 'userB'}, {'id': 1, username_field: 'userA'}, ] def test_cannot_order_by_non_serializer_fields(self): for serializer_cls in [ SensitiveDataSerializer1, SensitiveDataSerializer2, SensitiveDataSerializer3 ]: class OrderingListView(generics.ListAPIView): queryset = SensitiveOrderingFilterModel.objects.all().order_by('username') filter_backends = (filters.OrderingFilter,) serializer_class = serializer_cls view = OrderingListView.as_view() request = factory.get('/', {'ordering': 'password'}) response = view(request) if serializer_cls == SensitiveDataSerializer3: username_field = 'user' else: username_field = 'username' # Note: The passwords are not in order. Default ordering is used. assert response.data == [ {'id': 1, username_field: 'userA'}, # PassB {'id': 2, username_field: 'userB'}, # PassC {'id': 3, username_field: 'userC'}, # PassA ]
#!/usr/bin/env python # # Copyright 2010,2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, uhd from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import sys def add_freq_option(parser): """ Hackery that has the -f / --freq option set both tx_freq and rx_freq """ def freq_callback(option, opt_str, value, parser): parser.values.rx_freq = value parser.values.tx_freq = value parser.values.freq = value if not parser.has_option('--freq'): parser.add_option('-f', '--freq', type="eng_float", action="callback", callback=freq_callback, help="set Tx and/or Rx frequency to FREQ [default=%default]", metavar="FREQ") class uhd_interface: def __init__(self, istx, args, bandwidth, freq=None, gain=None, spec=None, antenna=None, external=False): if(istx): self.u = uhd.usrp_sink(device_addr=args, stream_args=uhd.stream_args('fc32')) else: self.u = uhd.usrp_source(device_addr=args, stream_args=uhd.stream_args('fc32')) # Set the subdevice spec if(spec): self.u.set_subdev_spec(spec, 0) # Set the antenna if(antenna): self.u.set_antenna(antenna, 0) # Set ref clock if(external): self.u.set_clock_source("external", 0) self.u.set_time_source("external", 0) self._args = args self._ant = antenna self._spec = spec self._external = external self._gain = self.set_gain(gain) self._freq = self.set_freq(freq) self._rate = self.set_sample_rate(bandwidth) def set_sample_rate(self, bandwidth): self.u.set_samp_rate(bandwidth) actual_bw = self.u.get_samp_rate() return actual_bw def get_sample_rate(self): return self.u.get_samp_rate() def set_gain(self, gain=None): if gain is None: # if no gain was specified, use the mid-point in dB g = self.u.get_gain_range() gain = float(g.start()+g.stop())/2 print "\nNo gain specified." print "Setting gain to %f (from [%f, %f])" % \ (gain, g.start(), g.stop()) self.u.set_gain(gain, 0) return gain def set_freq(self, freq=None): if(freq is None): sys.stderr.write("You must specify -f FREQ or --freq FREQ\n") sys.exit(1) r = self.u.set_center_freq(freq, 0) if r: return freq else: frange = self.u.get_freq_range() sys.stderr.write(("\nRequested frequency (%f) out or range [%f, %f]\n") % \ (freq, frange.start(), frange.stop())) sys.exit(1) #-------------------------------------------------------------------# # TRANSMITTER #-------------------------------------------------------------------# class uhd_transmitter(uhd_interface, gr.hier_block2): def __init__(self, args, bandwidth, freq=None, gain=None, spec=None, antenna=None, external=False, verbose=False): gr.hier_block2.__init__(self, "uhd_transmitter", gr.io_signature(1,1,gr.sizeof_gr_complex), gr.io_signature(0,0,0)) # Set up the UHD interface as a transmitter uhd_interface.__init__(self, True, args, bandwidth, freq, gain, spec, antenna, external) self.connect(self, self.u) if(verbose): self._print_verbage() def add_options(parser): add_freq_option(parser) parser.add_option("-t", "--tx-args", type="string", default="", help="UHD device address args [default=%default]") parser.add_option("", "--spec", type="string", default=None, help="Subdevice of UHD device where appropriate") parser.add_option("-A", "--antenna", type="string", default=None, help="select Rx Antenna where appropriate") parser.add_option("", "--tx-freq", type="eng_float", default=None, help="set transmit frequency to FREQ [default=%default]", metavar="FREQ") parser.add_option("", "--tx-gain", type="eng_float", default=None, help="set transmit gain in dB (default is midpoint)") parser.add_option("","--external", action="store_true", default=False, help="enable external clock") if not parser.has_option("--verbose"): parser.add_option("-v", "--verbose", action="store_true", default=False) # Make a static method to call before instantiation add_options = staticmethod(add_options) def _print_verbage(self): """ Prints information about the UHD transmitter """ print "\nUHD Transmitter:" print "UHD Args: %s" % (self._args) print "Freq: %sHz" % (eng_notation.num_to_str(self._freq)) print "Gain: %f dB" % (self._gain) print "Sample Rate: %ssps" % (eng_notation.num_to_str(self._rate)) print "Antenna: %s" % (self._ant) print "Subdev Sec: %s" % (self._spec) if self._external: print "\n Using External Clock and PPS \n" # New Function: return usrp time def get_usrp_time(self): time = self.u.get_time_now() return time.get_real_secs(), time.get_frac_secs() #-------------------------------------------------------------------# # RECEIVER #-------------------------------------------------------------------# class uhd_receiver(uhd_interface, gr.hier_block2): def __init__(self, args, bandwidth, freq=None, gain=None, spec=None, antenna=None, external=False, verbose=False): gr.hier_block2.__init__(self, "uhd_receiver", gr.io_signature(0,0,0), gr.io_signature(1,1,gr.sizeof_gr_complex)) # Set up the UHD interface as a receiver uhd_interface.__init__(self, False, args, bandwidth, freq, gain, spec, antenna, external) self.connect(self.u, self) if(verbose): self._print_verbage() def add_options(parser): add_freq_option(parser) parser.add_option("-r", "--rx-args", type="string", default="", help="UHD device address args [default=%default]") parser.add_option("", "--spec", type="string", default=None, help="Subdevice of UHD device where appropriate") parser.add_option("-A", "--antenna", type="string", default=None, help="select Rx Antenna where appropriate") parser.add_option("", "--rx-freq", type="eng_float", default=None, help="set receive frequency to FREQ [default=%default]", metavar="FREQ") parser.add_option("", "--rx-gain", type="eng_float", default=None, help="set receive gain in dB (default is midpoint)") parser.add_option("","--external", action="store_true", default=False, help="enable external clock") if not parser.has_option("--verbose"): parser.add_option("-v", "--verbose", action="store_true", default=False) # Make a static method to call before instantiation add_options = staticmethod(add_options) def _print_verbage(self): """ Prints information about the UHD transmitter """ print "\nUHD Receiver:" print "UHD Args: %s" % (self._args) print "Freq: %sHz" % (eng_notation.num_to_str(self._freq)) print "Gain: %f dB" % (self._gain) print "Sample Rate: %ssps" % (eng_notation.num_to_str(self._rate)) print "Antenna: %s" % (self._ant) print "Subdev Sec: %s" % (self._spec) if self._external: print "\n Using External Clock and PPS \n" # New Function: return usrp time def get_usrp_time(self): time = self.u.get_time_now() return time.get_real_secs(), time.get_frac_secs()
# Copyright (c) 2013 Mattias Svala # Copyright (c) 2013 Tao Sauvage # Copyright (c) 2014 ramnes # Copyright (c) 2014 Sean Vig # Copyright (c) 2014 dmpayton # Copyright (c) 2014 dequis # Copyright (c) 2014 Tycho Andersen # Copyright (c) 2015 Serge Hallyn # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import division from .base import Layout # We have an array of columns. Each columns is a dict containing # width (in percent), rows (an array of rows), and mode, which is # either 'stack' or 'split' # # Each row is an array of clients class Wmii(Layout): """This layout emulates wmii layouts The screen it split into columns, always starting with one. A new window is created in the active window's column. Windows can be shifted left and right. If there is no column when shifting, a new one is created. Each column can be stacked or divided (equally split). This layout implements something akin to wmii's semantics. Each group starts with one column. The first window takes up the whole screen. Next window splits the column in half. Windows can be moved to the column to the left or right. If there is no column in the direction being moved into, a new column is created. Each column can be either stacked (each window takes up the whole vertical real estate) or split (the windows are split equally vertically in the column) Columns can be grown horizontally (cmd_grow_left/right). My config.py has the following added:: Key( [mod, "shift", "control"], "l", lazy.layout.grow_right() ), Key( [mod, "shift"], "l", lazy.layout.shuffle_right() ), Key( [mod, "shift", "control"], "h", lazy.layout.grow_left() ), Key( [mod, "shift"], "h", lazy.layout.shuffle_left() ), Key( [mod], "s", lazy.layout.toggle_split() ), """ defaults = [ ("border_focus", "#881111", "Border colour for the focused window."), ("border_normal", "#220000", "Border colour for un-focused windows."), ("border_focus_stack", "#0000ff", "Border colour for un-focused windows."), ("border_normal_stack", "#000022", "Border colour for un-focused windows."), ("grow_amount", 5, "Amount by which to grow/shrink a window."), ("border_width", 2, "Border width."), ("name", "wmii", "Name of this layout."), ("margin", 0, "Margin of the layout"), ] def __init__(self, **config): Layout.__init__(self, **config) self.add_defaults(Wmii.defaults) self.current_window = None self.clients = [] self.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}] def info(self): d = Layout.info(self) d["current_window"] = self.current_window d["clients"] = [x.name for x in self.clients] return d def add_column(self, prepend, win): newwidth = int(100 / (len(self.columns) + 1)) # we are only called if there already is a column, simplifies things for c in self.columns: c['width'] = newwidth c = {'width': newwidth, 'mode': 'split', 'rows': [win]} if prepend: self.columns.insert(0, c) else: self.columns.append(c) def clone(self, group): c = Layout.clone(self, group) c.current_window = None c.clients = [] c.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}] return c def current_column(self): if self.current_window is None: return None for c in self.columns: if self.current_window in c['rows']: return c return None def add(self, client): self.clients.append(client) c = self.current_column() if c is None: if len(self.columns) == 0: self.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}] c = self.columns[0] c['rows'].append(client) self.focus(client) def remove(self, client): if client not in self.clients: return self.clients.remove(client) for c in self.columns: if client in c['rows']: ridx = c['rows'].index(client) cidx = self.columns.index(c) c['rows'].remove(client) if len(c['rows']) != 0: if client == self.current_window: if ridx > 0: ridx -= 1 newclient = c['rows'][ridx] self.focus(newclient) self.group.focus(self.current_window) return self.current_window # column is now empty, remove it and select the previous one self.columns.remove(c) if len(self.columns) == 0: return None newwidth = int(100 / len(self.columns)) for c in self.columns: c['width'] = newwidth if len(self.columns) == 1: # there is no window at all return None if cidx > 0: cidx -= 1 c = self.columns[cidx] rows = c['rows'] newclient = rows[0] self.focus(newclient) self.group.focus(newclient) return newclient def is_last_column(self, cidx): return cidx == len(self.columns) - 1 def focus(self, client): self.current_window = client for c in self.columns: if client in c['rows']: c['active'] = c['rows'].index(client) def configure(self, client, screen): show = True if client not in self.clients: return ridx = -1 xoffset = int(screen.x) for c in self.columns: if client in c['rows']: ridx = c['rows'].index(client) break xoffset += int(float(c['width']) * screen.width / 100.0) if ridx == -1: return if client == self.current_window: if c['mode'] == 'split': px = self.group.qtile.colorPixel(self.border_focus) else: px = self.group.qtile.colorPixel(self.border_focus_stack) else: if c['mode'] == 'split': px = self.group.qtile.colorPixel(self.border_normal) else: px = self.group.qtile.colorPixel(self.border_normal_stack) if c['mode'] == 'split': oneheight = screen.height / len(c['rows']) yoffset = int(screen.y + oneheight * ridx) win_height = int(oneheight - 2 * self.border_width) else: # stacked if c['active'] != c['rows'].index(client): show = False yoffset = int(screen.y) win_height = int(screen.height - 2 * self.border_width) win_width = int(float(c['width'] * screen.width / 100.0)) win_width -= 2 * self.border_width if show: client.place( xoffset, yoffset, win_width, win_height, self.border_width, px, margin=self.margin, ) client.unhide() else: client.hide() def cmd_toggle_split(self): c = self.current_column() if c['mode'] == "split": c['mode'] = "stack" else: c['mode'] = "split" self.group.layoutAll() def focus_next(self, win): self.cmd_down() return self.curent_window def focus_previous(self, win): self.cmd_up() return self.current_window def focus_first(self): if len(self.columns) == 0: self.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}] c = self.columns[0] if len(c['rows']) != 0: return c['rows'][0] def focus_last(self): c = self.columns[len(self.columns) - 1] if len(c['rows']) != 0: return c['rows'][len(c['rows']) - 1] def cmd_left(self): """Switch to the first window on prev column""" c = self.current_column() cidx = self.columns.index(c) if cidx == 0: return cidx -= 1 c = self.columns[cidx] if c['mode'] == "split": self.group.focus(c['rows'][0]) else: self.group.focus(c['rows'][c['active']]) def cmd_right(self): """Switch to the first window on next column""" c = self.current_column() cidx = self.columns.index(c) if self.is_last_column(cidx): return cidx += 1 c = self.columns[cidx] if c['mode'] == "split": self.group.focus(c['rows'][0]) else: self.group.focus(c['rows'][c['active']]) def cmd_up(self): """Switch to the previous window in current column""" c = self.current_column() if c is None: return ridx = c['rows'].index(self.current_window) if ridx == 0: if c['mode'] != "split": ridx = len(c['rows']) - 1 else: ridx -= 1 client = c['rows'][ridx] self.group.focus(client) def cmd_down(self): """Switch to the next window in current column""" c = self.current_column() if c is None: return ridx = c['rows'].index(self.current_window) if ridx == len(c['rows']) - 1: if c['mode'] != "split": ridx = 0 else: ridx += 1 client = c['rows'][ridx] self.group.focus(client) cmd_next = cmd_down cmd_previous = cmd_up def cmd_shuffle_left(self): cur = self.current_window if cur is None: return for c in self.columns: if cur in c['rows']: cidx = self.columns.index(c) if cidx == 0: if len(c['rows']) == 1: return c['rows'].remove(cur) self.add_column(True, cur) if len(c['rows']) == 0: self.columns.remove(c) else: c['rows'].remove(cur) self.columns[cidx - 1]['rows'].append(cur) if len(c['rows']) == 0: self.columns.remove(c) newwidth = int(100 / len(self.columns)) for c in self.columns: c['width'] = newwidth else: if c['active'] >= len(c['rows']): c['active'] = len(c['rows']) - 1 self.group.focus(cur) return def swap_column_width(self, grow, shrink): grower = self.columns[grow] shrinker = self.columns[shrink] amount = self.grow_amount if shrinker['width'] - amount < 5: return grower['width'] += amount shrinker['width'] -= amount def cmd_grow_left(self): cur = self.current_window if cur is None: return for c in self.columns: if cur in c['rows']: cidx = self.columns.index(c) if cidx == 0: # grow left for leftmost-column, shrink left if self.is_last_column(cidx): return self.swap_column_width(cidx + 1, cidx) self.group.focus(cur) return self.swap_column_width(cidx, cidx - 1) self.group.focus(cur) return def cmd_grow_right(self): cur = self.current_window if cur is None: return for c in self.columns: if cur in c['rows']: cidx = self.columns.index(c) if self.is_last_column(cidx): # grow right from right most, shrink right if cidx == 0: return self.swap_column_width(cidx - 1, cidx) self.group.focus(cur) return # grow my width by 20, reduce neighbor to the right by 20 self.swap_column_width(cidx, cidx + 1) self.group.focus(cur) return def cmd_shuffle_right(self): cur = self.current_window if cur is None: return for c in self.columns: if cur in c['rows']: cidx = self.columns.index(c) if self.is_last_column(cidx): if len(c['rows']) == 1: return c['rows'].remove(cur) self.add_column(False, cur) if len(c['rows']) == 0: self.columns.remove(c) else: c['rows'].remove(cur) self.columns[cidx + 1]['rows'].append(cur) if len(c['rows']) == 0: self.columns.remove(c) newwidth = int(100 / len(self.columns)) for c in self.columns: c['width'] = newwidth else: if c['active'] >= len(c['rows']): c['active'] = len(c['rows']) - 1 self.group.focus(cur) return def cmd_shuffle_down(self): for c in self.columns: if self.current_window in c['rows']: r = c['rows'] ridx = r.index(self.current_window) if ridx + 1 < len(r): r[ridx], r[ridx + 1] = r[ridx + 1], r[ridx] client = r[ridx + 1] self.focus(client) self.group.focus(client) return def cmd_shuffle_up(self): for c in self.columns: if self.current_window in c['rows']: r = c['rows'] ridx = r.index(self.current_window) if ridx > 0: r[ridx - 1], r[ridx] = r[ridx], r[ridx - 1] client = r[ridx - 1] self.focus(client) self.group.focus(client) return
import sys from functools import wraps # Code by Gary Bernhardt, taken from https://github.com/garybernhardt/dingus # Needed this code to do reliable loading of initial data using fixtures in # south. See http://stackoverflow.com/questions/5472925/django-loading-data-from-fixture-after-backward-migration-loaddata-is-using-mo/5906258#5906258 def DingusTestCase(object_under_test, exclude=None): if isinstance(exclude, basestring): raise ValueError("Strings not allowed for exclude. " + "Use a list: exclude=['identifier']") exclude = [] if exclude is None else exclude def get_names_under_test(): module = sys.modules[object_under_test.__module__] for name, value in module.__dict__.iteritems(): if value is object_under_test or name in exclude: yield name class TestCase(object): def setup(self): module_name = object_under_test.__module__ self._dingus_module = sys.modules[module_name] self._dingus_replace_module_globals(self._dingus_module) def teardown(self): self._dingus_restore_module(self._dingus_module) def _dingus_replace_module_globals(self, module): old_module_dict = module.__dict__.copy() module_keys = set(module.__dict__.iterkeys()) dunders = set(k for k in module_keys if k.startswith('__') and k.endswith('__')) replaced_keys = (module_keys - dunders - set(names_under_test)) for key in replaced_keys: module.__dict__[key] = Dingus() module.__dict__['__dingused_dict__'] = old_module_dict def _dingus_restore_module(self, module): old_module_dict = module.__dict__['__dingused_dict__'] module.__dict__.clear() module.__dict__.update(old_module_dict) names_under_test = list(get_names_under_test()) TestCase.__name__ = '%s_DingusTestCase' % '_'.join(names_under_test) return TestCase # These sentinels are used for argument defaults because the user might want # to pass in None, which is different in some cases than passing nothing. class NoReturnValue(object): pass class NoArgument(object): pass def patch(object_path, new_object=NoArgument): module_name, attribute_name = object_path.rsplit('.', 1) return _Patcher(module_name, attribute_name, new_object) class _Patcher: def __init__(self, module_name, attribute_name, new_object): self.module_name = module_name self.attribute_name = attribute_name self.module = _importer(self.module_name) if new_object is NoArgument: full_name = '%s.%s' % (module_name, attribute_name) self.new_object = Dingus(full_name) else: self.new_object = new_object def __call__(self, fn): @wraps(fn) def new_fn(*args, **kwargs): self.patch_object() try: return fn(*args, **kwargs) finally: self.restore_object() return new_fn def __enter__(self): self.patch_object() def __exit__(self, exc_type, exc_value, traceback): self.restore_object() def patch_object(self): self.original_object = getattr(self.module, self.attribute_name) setattr(self.module, self.attribute_name, self.new_object) def restore_object(self): setattr(self.module, self.attribute_name, self.original_object) def isolate(object_path): def decorator(fn): module_name, object_name = object_path.rsplit('.', 1) module = sys.modules[module_name] neighbors = set(dir(module)) - set([object_name]) for neighbor in neighbors: neighbor_path = '%s.%s' % (module_name, neighbor) fn = patch(neighbor_path)(fn) return fn return decorator def _importer(target): components = target.split('.') import_path = components.pop(0) thing = __import__(import_path) for comp in components: import_path += ".%s" % comp thing = _dot_lookup(thing, comp, import_path) return thing def _dot_lookup(thing, comp, import_path): try: return getattr(thing, comp) except AttributeError: __import__(import_path) return getattr(thing, comp) class DontCare(object): pass class Call(tuple): def __new__(cls, name, args, kwargs, return_value): return tuple.__new__(cls, (name, args, kwargs, return_value)) def __init__(self, *args): self.name = self[0] self.args = self[1] self.kwargs = self[2] self.return_value = self[3] def __getnewargs__(self): return (self.name, self.args, self.kwargs, self.return_value) class CallList(list): @staticmethod def _match_args(call, args): if not args: return True elif len(args) != len(call.args): return False else: return all(args[i] in (DontCare, call.args[i]) for i in range(len(call.args))) @staticmethod def _match_kwargs(call, kwargs): if not kwargs: return True elif len(kwargs) != len(call.kwargs): return False else: return all(name in kwargs and kwargs[name] in (DontCare, val) for name, val in call.kwargs.iteritems()) def one(self): if len(self) == 1: return self[0] else: return None def once(self): return self.one() def __call__(self, __name=NoArgument, *args, **kwargs): return CallList([call for call in self if (__name is NoArgument or __name == call.name) and self._match_args(call, args) and self._match_kwargs(call, kwargs)]) def returner(return_value): return Dingus(return_value=return_value) class Dingus(object): @property def __enter__(self): return self._existing_or_new_child('__enter__') def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): if exc_type and exc_type not in self.consumed_context_manager_exceptions: return False else: return True def __init__(self, dingus_name=None, full_name=None, consumed_context_manager_exceptions=None, **kwargs): self._parent = None self.reset() name = 'dingus_%i' % id(self) if dingus_name is None else dingus_name full_name = name if full_name is None else full_name self._short_name = name self._full_name = full_name self.__name__ = name self._full_name = full_name self.consumed_context_manager_exceptions = ( consumed_context_manager_exceptions or []) for attr_name, attr_value in kwargs.iteritems(): if attr_name.endswith('__returns'): attr_name = attr_name.replace('__returns', '') returner = self._create_child(attr_name) returner.return_value = attr_value setattr(self, attr_name, returner) else: setattr(self, attr_name, attr_value) self._replace_init_method() @classmethod def many(cls, count): return tuple(cls() for _ in range(count)) def _fake_init(self, *args, **kwargs): return self.__getattr__('__init__')(*args, **kwargs) def _replace_init_method(self): self.__init__ = self._fake_init def _create_child(self, name): separator = ('' if (name.startswith('()') or name.startswith('[')) else '.') full_name = self._full_name + separator + name child = self.__class__(name, full_name) child._parent = self return child def reset(self): self._return_value = NoReturnValue self.calls = CallList() self._children = {} def assert_call(self, *args, **kwargs): expected_call = self.calls('()', *args, **kwargs) if expected_call: return recorded_calls = self.calls calls_description = "No calls recorded" if not recorded_calls \ else "Recorded calls: %s" % recorded_calls message = "Expected a call to: '%s', " % self + \ "args: %s, kwargs: %s, " % (args, kwargs) + \ "\n" + calls_description raise AssertionError(message) def _get_return_value(self): if self._return_value is NoReturnValue: self._return_value = self._create_child('()') return self._return_value def _set_return_value(self, value): self._return_value = value return_value = property(_get_return_value, _set_return_value) def __call__(self, *args, **kwargs): self._log_call('()', args, kwargs, self.return_value) if self._parent: self._parent._log_call(self._short_name, args, kwargs, self.return_value) return self.return_value def _log_call(self, name, args, kwargs, return_value): self.calls.append(Call(name, args, kwargs, return_value)) def _should_ignore_attribute(self, name): return name in ['__pyobjc_object__', '__getnewargs__'] def __getstate__(self): # Python cannot pickle a instancemethod # http://bugs.python.org/issue558238 return [ (attr, value) for attr, value in self.__dict__.items() if attr != "__init__"] def __setstate__(self, state): self.__dict__.update(state) self._replace_init_method() def _existing_or_new_child(self, child_name, default_value=NoArgument): if child_name not in self._children: value = (self._create_child(child_name) if default_value is NoArgument else default_value) self._children[child_name] = value return self._children[child_name] def _remove_child_if_exists(self, child_name): if child_name in self._children: del self._children[child_name] def __getattr__(self, name): if self._should_ignore_attribute(name): raise AttributeError(name) return self._existing_or_new_child(name) def __delattr__(self, name): self._log_call('__delattr__', (name,), {}, None) def __getitem__(self, index): child_name = '[%s]' % (index,) return_value = self._existing_or_new_child(child_name) self._log_call('__getitem__', (index,), {}, return_value) return return_value def __setitem__(self, index, value): child_name = '[%s]' % (index,) self._log_call('__setitem__', (index, value), {}, None) self._remove_child_if_exists(child_name) self._existing_or_new_child(child_name, value) def _create_infix_operator(name): def operator_fn(self, other): return_value = self._existing_or_new_child(name) self._log_call(name, (other,), {}, return_value) return return_value operator_fn.__name__ = name return operator_fn _BASE_OPERATOR_NAMES = ['add', 'and', 'div', 'lshift', 'mod', 'mul', 'or', 'pow', 'rshift', 'sub', 'xor'] def _infix_operator_names(base_operator_names): # This function has to have base_operator_names passed in because # Python's scoping rules prevent it from seeing the class-level # _BASE_OPERATOR_NAMES. reverse_operator_names = ['r%s' % name for name in base_operator_names] for operator_name in base_operator_names + reverse_operator_names: operator_fn_name = '__%s__' % operator_name yield operator_fn_name # Define each infix operator for operator_fn_name in _infix_operator_names(_BASE_OPERATOR_NAMES): exec('%s = _create_infix_operator("%s")' % (operator_fn_name, operator_fn_name)) def _augmented_operator_names(base_operator_names): # Augmented operators are things like +=. They behavior differently # than normal infix operators because they return self instead of a # new object. return ['__i%s__' % operator_name for operator_name in base_operator_names] def _create_augmented_operator(name): def operator_fn(self, other): return_value = self self._log_call(name, (other,), {}, return_value) return return_value operator_fn.__name__ = name return operator_fn # Define each augmenting operator for operator_fn_name in _augmented_operator_names(_BASE_OPERATOR_NAMES): exec('%s = _create_augmented_operator("%s")' % (operator_fn_name, operator_fn_name)) def __str__(self): return '<Dingus %s>' % self._full_name __repr__ = __str__ def __len__(self): return 1 def __iter__(self): return iter([self._existing_or_new_child('__iter__')]) # We don't want to define __deepcopy__ at all. If there isn't one, deepcopy # will clone the whole object, which is what we want. __deepcopy__ = None def exception_raiser(exception): def raise_exception(*args, **kwargs): raise exception return raise_exception def loaddata(orm, fixture_name): _get_model = lambda model_identifier: orm[model_identifier] with patch('django.core.serializers.python._get_model', _get_model): from django.core.management import call_command call_command("loaddata", fixture_name)
# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree CONNECTION_INFO = { 'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd' } NODE_NAME = 'fake_node' VSERVER_NAME = 'fake_vserver' VSERVER_NAME_2 = 'fake_vserver_2' ADMIN_VSERVER_NAME = 'fake_admin_vserver' NODE_VSERVER_NAME = 'fake_node_vserver' ROOT_VOLUME_AGGREGATE_NAME = 'fake_root_aggr' ROOT_VOLUME_NAME = 'fake_root_volume' SHARE_AGGREGATE_NAME = 'fake_aggr1' SHARE_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2') SHARE_AGGREGATE_RAID_TYPES = ('raid4', 'raid_dp') SHARE_AGGREGATE_DISK_TYPE = 'FCAL' SHARE_NAME = 'fake_share' SHARE_SIZE = '1000000000' SHARE_NAME_2 = 'fake_share_2' SNAPSHOT_NAME = 'fake_snapshot' CG_SNAPSHOT_ID = 'fake_cg_id' PARENT_SHARE_NAME = 'fake_parent_share' PARENT_SNAPSHOT_NAME = 'fake_parent_snapshot' MAX_FILES = 5000 LANGUAGE = 'fake_language' SNAPSHOT_POLICY_NAME = 'fake_snapshot_policy' EXPORT_POLICY_NAME = 'fake_export_policy' DELETED_EXPORT_POLICIES = { VSERVER_NAME: [ 'deleted_manila_fake_policy_1', 'deleted_manila_fake_policy_2', ], VSERVER_NAME_2: [ 'deleted_manila_fake_policy_3', ], } USER_NAME = 'fake_user' PORT = 'e0a' VLAN = '1001' VLAN_PORT = 'e0a-1001' IP_ADDRESS = '10.10.10.10' NETMASK = '255.255.255.0' NET_ALLOCATION_ID = 'fake_allocation_id' LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s' LIF_NAME = LIF_NAME_TEMPLATE % {'net_allocation_id': NET_ALLOCATION_ID} IPSPACE = 'fake_ipspace' BROADCAST_DOMAIN = 'fake_domain' MTU = 9000 EMS_MESSAGE = { 'computer-name': 'fake_host', 'event-id': '0', 'event-source': 'fake driver', 'app-version': 'fake app version', 'category': 'fake category', 'event-description': 'fake description', 'log-level': '6', 'auto-support': 'false', } NO_RECORDS_RESPONSE = etree.XML(""" <results status="passed"> <num-records>0</num-records> </results> """) PASSED_RESPONSE = etree.XML(""" <results status="passed" /> """) VSERVER_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <vserver-info> <vserver-name>%(fake_vserver)s</vserver-name> </vserver-info> </attributes-list> <num-records>1</num-records> </results> """ % {'fake_vserver': VSERVER_NAME}) VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <vserver-info> <root-volume>%(root_volume)s</root-volume> <vserver-name>%(fake_vserver)s</vserver-name> </vserver-info> </attributes-list> <num-records>1</num-records> </results> """ % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME}) VSERVER_GET_RESPONSE = etree.XML(""" <results status="passed"> <attributes> <vserver-info> <aggr-list> <aggr-name>%(aggr1)s</aggr-name> <aggr-name>%(aggr2)s</aggr-name> </aggr-list> <vserver-aggr-info-list> <vserver-aggr-info> <aggr-availsize>45678592</aggr-availsize> <aggr-name>%(aggr1)s</aggr-name> </vserver-aggr-info> <vserver-aggr-info> <aggr-availsize>6448431104</aggr-availsize> <aggr-name>%(aggr2)s</aggr-name> </vserver-aggr-info> </vserver-aggr-info-list> <vserver-name>%(vserver)s</vserver-name> </vserver-info> </attributes> </results> """ % { 'vserver': VSERVER_NAME, 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) VSERVER_DATA_LIST_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <vserver-info> <vserver-name>%(vserver)s</vserver-name> <vserver-type>data</vserver-type> </vserver-info> </attributes-list> <num-records>1</num-records> </results> """ % {'vserver': VSERVER_NAME}) VSERVER_AGGREGATES = { SHARE_AGGREGATE_NAMES[0]: { 'available': 45678592, }, SHARE_AGGREGATE_NAMES[1]: { 'available': 6448431104, }, } VSERVER_GET_RESPONSE_NO_AGGREGATES = etree.XML(""" <results status="passed"> <attributes> <vserver-info> <vserver-name>%(vserver)s</vserver-name> </vserver-info> </attributes> </results> """ % {'vserver': VSERVER_NAME}) ONTAPI_VERSION_RESPONSE = etree.XML(""" <results status="passed"> <major-version>1</major-version> <minor-version>19</minor-version> </results> """) LICENSE_V2_LIST_INFO_RESPONSE = etree.XML(""" <results status="passed"> <licenses> <license-v2-info> <customer-id>none</customer-id> <description>Cluster Base License</description> <legacy>false</legacy> <owner>cluster3</owner> <package>base</package> <serial-number>1-80-000008</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>NFS License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>nfs</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>CIFS License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>cifs</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>iSCSI License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>iscsi</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>FCP License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>fcp</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>SnapRestore License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>snaprestore</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>SnapMirror License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>snapmirror</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>FlexClone License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>flexclone</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> <license-v2-info> <customer-id>none</customer-id> <description>SnapVault License</description> <legacy>false</legacy> <owner>cluster3-01</owner> <package>snapvault</package> <serial-number>1-81-0000000000000004082368507</serial-number> <type>license</type> </license-v2-info> </licenses> </results> """) LICENSES = ( 'base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror', 'snaprestore', 'snapvault' ) VOLUME_COUNT_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <volume-attributes> <volume-id-attributes> <name>vol0</name> <owning-vserver-name>cluster3-01</owning-vserver-name> </volume-id-attributes> </volume-attributes> <volume-attributes> <volume-id-attributes> <name>%(root_volume)s</name> <owning-vserver-name>%(fake_vserver)s</owning-vserver-name> </volume-id-attributes> </volume-attributes> </attributes-list> <num-records>2</num-records> </results> """ % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME}) CIFS_SECURITY_SERVICE = { 'type': 'active_directory', 'password': 'fake_password', 'user': 'fake_user', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', } LDAP_SECURITY_SERVICE = { 'type': 'ldap', 'password': 'fake_password', 'server': 'fake_server', 'id': 'fake_id', } KERBEROS_SECURITY_SERVICE = { 'type': 'kerberos', 'password': 'fake_password', 'user': 'fake_user', 'server': 'fake_server', 'id': 'fake_id', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', } KERBEROS_SERVICE_PRINCIPAL_NAME = 'nfs/fake-vserver.fake_domain@FAKE_DOMAIN' INVALID_SECURITY_SERVICE = { 'type': 'fake', } SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <node-details-info> <node>%s</node> </node-details-info> </attributes-list> <num-records>1</num-records> </results> """ % NODE_NAME) NET_PORT_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <net-port-info> <administrative-duplex>full</administrative-duplex> <administrative-flowcontrol>full</administrative-flowcontrol> <administrative-speed>auto</administrative-speed> <is-administrative-auto-negotiate>true</is-administrative-auto-negotiate> <is-administrative-up>true</is-administrative-up> <is-operational-auto-negotiate>true</is-operational-auto-negotiate> <link-status>up</link-status> <mac-address>00:0c:29:fc:04:d9</mac-address> <mtu>1500</mtu> <node>%(node_name)s</node> <operational-duplex>full</operational-duplex> <operational-flowcontrol>none</operational-flowcontrol> <operational-speed>10</operational-speed> <port>e0a</port> <port-type>physical</port-type> <role>data</role> </net-port-info> <net-port-info> <administrative-duplex>full</administrative-duplex> <administrative-flowcontrol>full</administrative-flowcontrol> <administrative-speed>auto</administrative-speed> <is-administrative-auto-negotiate>true</is-administrative-auto-negotiate> <is-administrative-up>true</is-administrative-up> <is-operational-auto-negotiate>true</is-operational-auto-negotiate> <link-status>up</link-status> <mac-address>00:0c:29:fc:04:e3</mac-address> <mtu>1500</mtu> <node>%(node_name)s</node> <operational-duplex>full</operational-duplex> <operational-flowcontrol>none</operational-flowcontrol> <operational-speed>100</operational-speed> <port>e0b</port> <port-type>physical</port-type> <role>data</role> </net-port-info> <net-port-info> <administrative-duplex>full</administrative-duplex> <administrative-flowcontrol>full</administrative-flowcontrol> <administrative-speed>auto</administrative-speed> <is-administrative-auto-negotiate>true</is-administrative-auto-negotiate> <is-administrative-up>true</is-administrative-up> <is-operational-auto-negotiate>true</is-operational-auto-negotiate> <link-status>up</link-status> <mac-address>00:0c:29:fc:04:ed</mac-address> <mtu>1500</mtu> <node>%(node_name)s</node> <operational-duplex>full</operational-duplex> <operational-flowcontrol>none</operational-flowcontrol> <operational-speed>1000</operational-speed> <port>e0c</port> <port-type>physical</port-type> <role>data</role> </net-port-info> <net-port-info> <administrative-duplex>full</administrative-duplex> <administrative-flowcontrol>full</administrative-flowcontrol> <administrative-speed>auto</administrative-speed> <is-administrative-auto-negotiate>true</is-administrative-auto-negotiate> <is-administrative-up>true</is-administrative-up> <is-operational-auto-negotiate>true</is-operational-auto-negotiate> <link-status>up</link-status> <mac-address>00:0c:29:fc:04:f7</mac-address> <mtu>1500</mtu> <node>%(node_name)s</node> <operational-duplex>full</operational-duplex> <operational-flowcontrol>none</operational-flowcontrol> <operational-speed>10000</operational-speed> <port>e0d</port> <port-type>physical</port-type> <role>data</role> </net-port-info> </attributes-list> <num-records>4</num-records> </results> """ % {'node_name': NODE_NAME}) SPEED_SORTED_PORTS = ( {'node': NODE_NAME, 'port': 'e0d', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'e0c', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'e0b', 'speed': '100'}, {'node': NODE_NAME, 'port': 'e0a', 'speed': '10'}, ) PORT_NAMES = ('e0a', 'e0b', 'e0c', 'e0d') SPEED_SORTED_PORT_NAMES = ('e0d', 'e0c', 'e0b', 'e0a') UNSORTED_PORTS_ALL_SPEEDS = ( {'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'}, {'node': NODE_NAME, 'port': 'port3', 'speed': '100'}, {'node': NODE_NAME, 'port': 'port1', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'port4', 'speed': '10'}, {'node': NODE_NAME, 'port': 'port7'}, {'node': NODE_NAME, 'port': 'port2', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'}, ) SORTED_PORTS_ALL_SPEEDS = ( {'node': NODE_NAME, 'port': 'port1', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'port2', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'port3', 'speed': '100'}, {'node': NODE_NAME, 'port': 'port4', 'speed': '10'}, {'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'}, {'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'}, {'node': NODE_NAME, 'port': 'port7'}, ) NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <net-port-info> <broadcast-domain>%(domain)s</broadcast-domain> <node>%(node)s</node> <port>%(port)s</port> </net-port-info> </attributes-list> <num-records>1</num-records> </results> """ % {'domain': BROADCAST_DOMAIN, 'node': NODE_NAME, 'port': PORT}) NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <net-port-info> <node>%(node)s</node> <port>%(port)s</port> </net-port-info> </attributes-list> <num-records>1</num-records> </results> """ % {'node': NODE_NAME, 'port': PORT}) NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <net-port-broadcast-domain-info> <broadcast-domain>%(domain)s</broadcast-domain> <ipspace>%(ipspace)s</ipspace> </net-port-broadcast-domain-info> </attributes-list> <num-records>1</num-records> </results> """ % {'domain': BROADCAST_DOMAIN, 'ipspace': IPSPACE}) NET_INTERFACE_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <net-interface-info> <address>192.168.228.42</address> <address-family>ipv4</address-family> <administrative-status>up</administrative-status> <current-node>%(node)s</current-node> <current-port>e0c</current-port> <data-protocols> <data-protocol>none</data-protocol> </data-protocols> <dns-domain-name>none</dns-domain-name> <failover-group>system-defined</failover-group> <failover-policy>disabled</failover-policy> <firewall-policy>mgmt</firewall-policy> <home-node>%(node)s</home-node> <home-port>e0c</home-port> <interface-name>cluster_mgmt</interface-name> <is-auto-revert>true</is-auto-revert> <is-home>true</is-home> <lif-uuid>d3230112-7524-11e4-8608-123478563412</lif-uuid> <listen-for-dns-query>false</listen-for-dns-query> <netmask>%(netmask)s</netmask> <netmask-length>24</netmask-length> <operational-status>up</operational-status> <role>cluster_mgmt</role> <routing-group-name>c192.168.228.0/24</routing-group-name> <use-failover-group>system_defined</use-failover-group> <vserver>cluster3</vserver> </net-interface-info> <net-interface-info> <address>192.168.228.43</address> <address-family>ipv4</address-family> <administrative-status>up</administrative-status> <current-node>%(node)s</current-node> <current-port>e0d</current-port> <dns-domain-name>none</dns-domain-name> <failover-group>system-defined</failover-group> <failover-policy>nextavail</failover-policy> <firewall-policy>mgmt</firewall-policy> <home-node>%(node)s</home-node> <home-port>e0d</home-port> <interface-name>mgmt1</interface-name> <is-auto-revert>true</is-auto-revert> <is-home>true</is-home> <lif-uuid>0ccc57cc-7525-11e4-8608-123478563412</lif-uuid> <listen-for-dns-query>false</listen-for-dns-query> <netmask>%(netmask)s</netmask> <netmask-length>24</netmask-length> <operational-status>up</operational-status> <role>node_mgmt</role> <routing-group-name>n192.168.228.0/24</routing-group-name> <use-failover-group>system_defined</use-failover-group> <vserver>cluster3-01</vserver> </net-interface-info> <net-interface-info> <address>%(address)s</address> <address-family>ipv4</address-family> <administrative-status>up</administrative-status> <current-node>%(node)s</current-node> <current-port>%(vlan)s</current-port> <data-protocols> <data-protocol>nfs</data-protocol> <data-protocol>cifs</data-protocol> </data-protocols> <dns-domain-name>none</dns-domain-name> <failover-group>system-defined</failover-group> <failover-policy>nextavail</failover-policy> <firewall-policy>data</firewall-policy> <home-node>%(node)s</home-node> <home-port>%(vlan)s</home-port> <interface-name>%(lif)s</interface-name> <is-auto-revert>false</is-auto-revert> <is-home>true</is-home> <lif-uuid>db4d91b6-95d9-11e4-8608-123478563412</lif-uuid> <listen-for-dns-query>false</listen-for-dns-query> <netmask>%(netmask)s</netmask> <netmask-length>24</netmask-length> <operational-status>up</operational-status> <role>data</role> <routing-group-name>d10.0.0.0/24</routing-group-name> <use-failover-group>system_defined</use-failover-group> <vserver>%(vserver)s</vserver> </net-interface-info> </attributes-list> <num-records>3</num-records> </results> """ % { 'lif': LIF_NAME, 'vserver': VSERVER_NAME, 'node': NODE_NAME, 'address': IP_ADDRESS, 'netmask': NETMASK, 'vlan': VLAN_PORT, }) LIF_NAMES = ('cluster_mgmt', 'mgmt1', LIF_NAME) NET_INTERFACE_GET_ITER_RESPONSE_NFS = etree.XML(""" <results status="passed"> <attributes-list> <net-interface-info> <address>%(address)s</address> <address-family>ipv4</address-family> <administrative-status>up</administrative-status> <current-node>%(node)s</current-node> <current-port>%(vlan)s</current-port> <data-protocols> <data-protocol>nfs</data-protocol> <data-protocol>cifs</data-protocol> </data-protocols> <dns-domain-name>none</dns-domain-name> <failover-group>system-defined</failover-group> <failover-policy>nextavail</failover-policy> <firewall-policy>data</firewall-policy> <home-node>%(node)s</home-node> <home-port>%(vlan)s</home-port> <interface-name>%(lif)s</interface-name> <is-auto-revert>false</is-auto-revert> <is-home>true</is-home> <lif-uuid>db4d91b6-95d9-11e4-8608-123478563412</lif-uuid> <listen-for-dns-query>false</listen-for-dns-query> <netmask>%(netmask)s</netmask> <netmask-length>24</netmask-length> <operational-status>up</operational-status> <role>data</role> <routing-group-name>d10.0.0.0/24</routing-group-name> <use-failover-group>system_defined</use-failover-group> <vserver>%(vserver)s</vserver> </net-interface-info> </attributes-list> <num-records>1</num-records> </results> """ % { 'lif': LIF_NAME, 'vserver': VSERVER_NAME, 'node': NODE_NAME, 'address': IP_ADDRESS, 'netmask': NETMASK, 'vlan': VLAN_PORT, }) LIFS = ( {'address': '192.168.228.42', 'home-node': NODE_NAME, 'home-port': 'e0c', 'interface-name': 'cluster_mgmt', 'netmask': NETMASK, 'role': 'cluster_mgmt', 'vserver': 'cluster3' }, {'address': '192.168.228.43', 'home-node': NODE_NAME, 'home-port': 'e0d', 'interface-name': 'mgmt1', 'netmask': NETMASK, 'role': 'node_mgmt', 'vserver': 'cluster3-01' }, {'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ) NFS_LIFS = [ {'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ] NET_INTERFACE_GET_ONE_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <net-interface-info> <interface-name>%(lif)s</interface-name> <vserver>%(vserver)s</vserver> </net-interface-info> </attributes-list> <num-records>1</num-records> </results> """ % {'lif': LIF_NAME, 'vserver': VSERVER_NAME}) AGGR_GET_NAMES_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <aggr-attributes> <aggr-raid-attributes> <plexes> <plex-attributes> <plex-name>/%(aggr1)s/plex0</plex-name> <raidgroups> <raidgroup-attributes> <raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name> </raidgroup-attributes> </raidgroups> </plex-attributes> </plexes> </aggr-raid-attributes> <aggregate-name>%(aggr1)s</aggregate-name> </aggr-attributes> <aggr-attributes> <aggr-raid-attributes> <plexes> <plex-attributes> <plex-name>/%(aggr2)s/plex0</plex-name> <raidgroups> <raidgroup-attributes> <raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name> </raidgroup-attributes> <raidgroup-attributes> <raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name> </raidgroup-attributes> </raidgroups> </plex-attributes> </plexes> </aggr-raid-attributes> <aggregate-name>%(aggr2)s</aggregate-name> </aggr-attributes> </attributes-list> <num-records>2</num-records> </results> """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_SPACE_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <aggr-attributes> <aggr-raid-attributes> <plexes> <plex-attributes> <plex-name>/%(aggr1)s/plex0</plex-name> <raidgroups> <raidgroup-attributes> <raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name> </raidgroup-attributes> </raidgroups> </plex-attributes> </plexes> </aggr-raid-attributes> <aggr-space-attributes> <size-available>45670400</size-available> <size-total>943718400</size-total> <size-used>898048000</size-used> </aggr-space-attributes> <aggregate-name>%(aggr1)s</aggregate-name> </aggr-attributes> <aggr-attributes> <aggr-raid-attributes> <plexes> <plex-attributes> <plex-name>/%(aggr2)s/plex0</plex-name> <raidgroups> <raidgroup-attributes> <raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name> </raidgroup-attributes> <raidgroup-attributes> <raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name> </raidgroup-attributes> </raidgroups> </plex-attributes> </plexes> </aggr-raid-attributes> <aggr-space-attributes> <size-available>4267659264</size-available> <size-total>7549747200</size-total> <size-used>3282087936</size-used> </aggr-space-attributes> <aggregate-name>%(aggr2)s</aggregate-name> </aggr-attributes> </attributes-list> <num-records>2</num-records> </results> """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_NODE_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <aggr-attributes> <aggr-ownership-attributes> <home-name>%(node)s</home-name> </aggr-ownership-attributes> <aggregate-name>%(aggr)s</aggregate-name> </aggr-attributes> </attributes-list> <num-records>1</num-records> </results> """ % { 'aggr': SHARE_AGGREGATE_NAME, 'node': NODE_NAME }) AGGR_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <aggr-attributes> <aggr-64bit-upgrade-attributes> <aggr-status-attributes> <is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress> </aggr-status-attributes> </aggr-64bit-upgrade-attributes> <aggr-fs-attributes> <block-type>64_bit</block-type> <fsid>1758646411</fsid> <type>aggr</type> </aggr-fs-attributes> <aggr-inode-attributes> <files-private-used>512</files-private-used> <files-total>30384</files-total> <files-used>96</files-used> <inodefile-private-capacity>30384</inodefile-private-capacity> <inodefile-public-capacity>30384</inodefile-public-capacity> <maxfiles-available>30384</maxfiles-available> <maxfiles-possible>243191</maxfiles-possible> <maxfiles-used>96</maxfiles-used> <percent-inode-used-capacity>0</percent-inode-used-capacity> </aggr-inode-attributes> <aggr-ownership-attributes> <home-id>4082368507</home-id> <home-name>cluster3-01</home-name> <owner-id>4082368507</owner-id> <owner-name>cluster3-01</owner-name> </aggr-ownership-attributes> <aggr-performance-attributes> <free-space-realloc>off</free-space-realloc> <max-write-alloc-blocks>0</max-write-alloc-blocks> </aggr-performance-attributes> <aggr-raid-attributes> <checksum-status>active</checksum-status> <checksum-style>block</checksum-style> <disk-count>3</disk-count> <ha-policy>cfo</ha-policy> <has-local-root>true</has-local-root> <has-partner-root>false</has-partner-root> <is-checksum-enabled>true</is-checksum-enabled> <is-hybrid>false</is-hybrid> <is-hybrid-enabled>false</is-hybrid-enabled> <is-inconsistent>false</is-inconsistent> <mirror-status>unmirrored</mirror-status> <mount-state>online</mount-state> <plex-count>1</plex-count> <plexes> <plex-attributes> <is-online>true</is-online> <is-resyncing>false</is-resyncing> <plex-name>/%(aggr1)s/plex0</plex-name> <plex-status>normal,active</plex-status> <raidgroups> <raidgroup-attributes> <checksum-style>block</checksum-style> <is-cache-tier>false</is-cache-tier> <is-recomputing-parity>false</is-recomputing-parity> <is-reconstructing>false</is-reconstructing> <raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name> <recomputing-parity-percentage>0</recomputing-parity-percentage> <reconstruction-percentage>0</reconstruction-percentage> </raidgroup-attributes> </raidgroups> <resyncing-percentage>0</resyncing-percentage> </plex-attributes> </plexes> <raid-lost-write-state>on</raid-lost-write-state> <raid-size>16</raid-size> <raid-status>raid_dp, normal</raid-status> <raid-type>raid_dp</raid-type> <state>online</state> </aggr-raid-attributes> <aggr-snaplock-attributes> <is-snaplock>false</is-snaplock> </aggr-snaplock-attributes> <aggr-snapshot-attributes> <files-total>0</files-total> <files-used>0</files-used> <is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled> <is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled> <maxfiles-available>0</maxfiles-available> <maxfiles-possible>0</maxfiles-possible> <maxfiles-used>0</maxfiles-used> <percent-inode-used-capacity>0</percent-inode-used-capacity> <percent-used-capacity>0</percent-used-capacity> <size-available>0</size-available> <size-total>0</size-total> <size-used>0</size-used> <snapshot-reserve-percent>0</snapshot-reserve-percent> </aggr-snapshot-attributes> <aggr-space-attributes> <aggregate-metadata>245760</aggregate-metadata> <hybrid-cache-size-total>0</hybrid-cache-size-total> <percent-used-capacity>95</percent-used-capacity> <size-available>45670400</size-available> <size-total>943718400</size-total> <size-used>898048000</size-used> <total-reserved-space>0</total-reserved-space> <used-including-snapshot-reserve>898048000</used-including-snapshot-reserve> <volume-footprints>897802240</volume-footprints> </aggr-space-attributes> <aggr-volume-count-attributes> <flexvol-count>1</flexvol-count> <flexvol-count-collective>0</flexvol-count-collective> <flexvol-count-striped>0</flexvol-count-striped> </aggr-volume-count-attributes> <aggregate-name>%(aggr1)s</aggregate-name> <aggregate-uuid>15863632-ea49-49a8-9c88-2bd2d57c6d7a</aggregate-uuid> <nodes> <node-name>cluster3-01</node-name> </nodes> <striping-type>unknown</striping-type> </aggr-attributes> <aggr-attributes> <aggr-64bit-upgrade-attributes> <aggr-status-attributes> <is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress> </aggr-status-attributes> </aggr-64bit-upgrade-attributes> <aggr-fs-attributes> <block-type>64_bit</block-type> <fsid>706602229</fsid> <type>aggr</type> </aggr-fs-attributes> <aggr-inode-attributes> <files-private-used>528</files-private-used> <files-total>31142</files-total> <files-used>96</files-used> <inodefile-private-capacity>31142</inodefile-private-capacity> <inodefile-public-capacity>31142</inodefile-public-capacity> <maxfiles-available>31142</maxfiles-available> <maxfiles-possible>1945584</maxfiles-possible> <maxfiles-used>96</maxfiles-used> <percent-inode-used-capacity>0</percent-inode-used-capacity> </aggr-inode-attributes> <aggr-ownership-attributes> <home-id>4082368507</home-id> <home-name>cluster3-01</home-name> <owner-id>4082368507</owner-id> <owner-name>cluster3-01</owner-name> </aggr-ownership-attributes> <aggr-performance-attributes> <free-space-realloc>off</free-space-realloc> <max-write-alloc-blocks>0</max-write-alloc-blocks> </aggr-performance-attributes> <aggr-raid-attributes> <checksum-status>active</checksum-status> <checksum-style>block</checksum-style> <disk-count>10</disk-count> <ha-policy>sfo</ha-policy> <has-local-root>false</has-local-root> <has-partner-root>false</has-partner-root> <is-checksum-enabled>true</is-checksum-enabled> <is-hybrid>false</is-hybrid> <is-hybrid-enabled>false</is-hybrid-enabled> <is-inconsistent>false</is-inconsistent> <mirror-status>unmirrored</mirror-status> <mount-state>online</mount-state> <plex-count>1</plex-count> <plexes> <plex-attributes> <is-online>true</is-online> <is-resyncing>false</is-resyncing> <plex-name>/%(aggr2)s/plex0</plex-name> <plex-status>normal,active</plex-status> <raidgroups> <raidgroup-attributes> <checksum-style>block</checksum-style> <is-cache-tier>false</is-cache-tier> <is-recomputing-parity>false</is-recomputing-parity> <is-reconstructing>false</is-reconstructing> <raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name> <recomputing-parity-percentage>0</recomputing-parity-percentage> <reconstruction-percentage>0</reconstruction-percentage> </raidgroup-attributes> <raidgroup-attributes> <checksum-style>block</checksum-style> <is-cache-tier>false</is-cache-tier> <is-recomputing-parity>false</is-recomputing-parity> <is-reconstructing>false</is-reconstructing> <raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name> <recomputing-parity-percentage>0</recomputing-parity-percentage> <reconstruction-percentage>0</reconstruction-percentage> </raidgroup-attributes> </raidgroups> <resyncing-percentage>0</resyncing-percentage> </plex-attributes> </plexes> <raid-lost-write-state>on</raid-lost-write-state> <raid-size>8</raid-size> <raid-status>raid4, normal</raid-status> <raid-type>raid4</raid-type> <state>online</state> </aggr-raid-attributes> <aggr-snaplock-attributes> <is-snaplock>false</is-snaplock> </aggr-snaplock-attributes> <aggr-snapshot-attributes> <files-total>0</files-total> <files-used>0</files-used> <is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled> <is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled> <maxfiles-available>0</maxfiles-available> <maxfiles-possible>0</maxfiles-possible> <maxfiles-used>0</maxfiles-used> <percent-inode-used-capacity>0</percent-inode-used-capacity> <percent-used-capacity>0</percent-used-capacity> <size-available>0</size-available> <size-total>0</size-total> <size-used>0</size-used> <snapshot-reserve-percent>0</snapshot-reserve-percent> </aggr-snapshot-attributes> <aggr-space-attributes> <aggregate-metadata>425984</aggregate-metadata> <hybrid-cache-size-total>0</hybrid-cache-size-total> <percent-used-capacity>15</percent-used-capacity> <size-available>6448431104</size-available> <size-total>7549747200</size-total> <size-used>1101316096</size-used> <total-reserved-space>0</total-reserved-space> <used-including-snapshot-reserve>1101316096</used-including-snapshot-reserve> <volume-footprints>1100890112</volume-footprints> </aggr-space-attributes> <aggr-volume-count-attributes> <flexvol-count>2</flexvol-count> <flexvol-count-collective>0</flexvol-count-collective> <flexvol-count-striped>0</flexvol-count-striped> </aggr-volume-count-attributes> <aggregate-name>%(aggr2)s</aggregate-name> <aggregate-uuid>2a741934-1aaf-42dd-93ca-aaf231be108a</aggregate-uuid> <nodes> <node-name>cluster3-01</node-name> </nodes> <striping-type>not_striped</striping-type> </aggr-attributes> </attributes-list> <num-records>2</num-records> </results> """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) VOLUME_GET_NAME_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <volume-attributes> <volume-id-attributes> <name>%(volume)s</name> <owning-vserver-name>%(vserver)s</owning-vserver-name> </volume-id-attributes> </volume-attributes> </attributes-list> <num-records>1</num-records> </results> """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) VOLUME_GET_VOLUME_PATH_RESPONSE = etree.XML(""" <results status="passed"> <junction>/%(volume)s</junction> </results> """ % {'volume': SHARE_NAME}) VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE = etree.XML(""" <results status="passed"> <junction>\\%(volume)s</junction> </results> """ % {'volume': SHARE_NAME}) VOLUME_JUNCTION_PATH = '/' + SHARE_NAME VOLUME_JUNCTION_PATH_CIFS = '\\' + SHARE_NAME VOLUME_MODIFY_ITER_RESPONSE = etree.XML(""" <results status="passed"> <failure-list /> <num-failed>0</num-failed> <num-succeeded>1</num-succeeded> <success-list> <volume-modify-iter-info> <volume-key> <volume-attributes> <volume-id-attributes> <name>%(volume)s</name> <owning-vserver-name>%(vserver)s</owning-vserver-name> </volume-id-attributes> </volume-attributes> </volume-key> </volume-modify-iter-info> </success-list> </results> """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) VOLUME_MODIFY_ITER_ERROR_RESPONSE = etree.XML(""" <results status="passed"> <failure-list> <volume-modify-iter-info> <error-code>160</error-code> <error-message>Unable to set volume attribute "size"</error-message> <volume-key> <volume-attributes> <volume-id-attributes> <name>%(volume)s</name> <owning-vserver-name>%(vserver)s</owning-vserver-name> </volume-id-attributes> </volume-attributes> </volume-key> </volume-modify-iter-info> </failure-list> <num-failed>1</num-failed> <num-succeeded>0</num-succeeded> </results> """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <snapshot-info> <busy>false</busy> <name>%(snap)s</name> <volume>%(volume)s</volume> <vserver>%(vserver)s</vserver> </snapshot-info> </attributes-list> <num-records>1</num-records> </results> """ % {'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <snapshot-info> <busy>true</busy> <name>%(snap)s</name> <volume>%(volume)s</volume> <vserver>%(vserver)s</vserver> <snapshot-owners-list> <snapshot-owner> <owner>volume clone</owner> </snapshot-owner> </snapshot-owners-list> </snapshot-info> </attributes-list> <num-records>1</num-records> </results> """ % {'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <snapshot-info> <busy>false</busy> <name>%(snap)s</name> <volume>%(volume)s</volume> <vserver>%(vserver)s</vserver> </snapshot-info> <snapshot-info> <busy>false</busy> <name>%(snap)s</name> <volume>%(root_volume)s</volume> <vserver>%(admin_vserver)s</vserver> </snapshot-info> </attributes-list> <num-records>1</num-records> </results> """ % { 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, 'root_volume': ROOT_VOLUME_NAME, 'admin_vserver': ADMIN_VSERVER_NAME, }) SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE = etree.XML(""" <results status="passed"> <num-records>0</num-records> <volume-errors> <volume-error> <errno>13023</errno> <name>%(volume)s</name> <reason>Unable to get information for Snapshot copies of volume \ "%(volume)s" on Vserver "%(vserver)s". Reason: Volume not online.</reason> <vserver>%(vserver)s</vserver> </volume-error> </volume-errors> </results> """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE = etree.XML(""" <results status="passed"> <num-records>0</num-records> <volume-errors> <volume-error> <errno>99999</errno> <name>%(volume)s</name> <reason>Unable to get information for Snapshot copies of volume \ "%(volume)s" on Vserver "%(vserver)s".</reason> <vserver>%(vserver)s</vserver> </volume-error> </volume-errors> </results> """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_MULTIDELETE_ERROR_RESPONSE = etree.XML(""" <results status="passed"> <volume-errors> <volume-error> <errno>13021</errno> <name>%(volume)s</name> <reason>No such snapshot.</reason> </volume-error> </volume-errors> </results> """ % {'volume': SHARE_NAME}) NFS_EXPORT_RULES = ('10.10.10.10', '10.10.10.20') NFS_EXPORTFS_LIST_RULES_2_NO_RULES_RESPONSE = etree.XML(""" <results status="passed"> <rules /> </results> """) NFS_EXPORTFS_LIST_RULES_2_RESPONSE = etree.XML(""" <results status="passed"> <rules> <exports-rule-info-2> <pathname>%(path)s</pathname> <security-rules> <security-rule-info> <anon>65534</anon> <nosuid>false</nosuid> <read-only> <exports-hostname-info> <name>%(host1)s</name> </exports-hostname-info> <exports-hostname-info> <name>%(host2)s</name> </exports-hostname-info> </read-only> <read-write> <exports-hostname-info> <name>%(host1)s</name> </exports-hostname-info> <exports-hostname-info> <name>%(host2)s</name> </exports-hostname-info> </read-write> <root> <exports-hostname-info> <name>%(host1)s</name> </exports-hostname-info> <exports-hostname-info> <name>%(host2)s</name> </exports-hostname-info> </root> <sec-flavor> <sec-flavor-info> <flavor>sys</flavor> </sec-flavor-info> </sec-flavor> </security-rule-info> </security-rules> </exports-rule-info-2> </rules> </results> """ % { 'path': VOLUME_JUNCTION_PATH, 'host1': NFS_EXPORT_RULES[0], 'host2': NFS_EXPORT_RULES[1], }) AGGR_GET_RAID_TYPE_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <aggr-attributes> <aggr-raid-attributes> <plexes> <plex-attributes> <plex-name>/%(aggr1)s/plex0</plex-name> <raidgroups> <raidgroup-attributes> <raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name> </raidgroup-attributes> </raidgroups> </plex-attributes> </plexes> <raid-type>%(raid_type1)s</raid-type> </aggr-raid-attributes> <aggregate-name>%(aggr1)s</aggregate-name> </aggr-attributes> <aggr-attributes> <aggr-raid-attributes> <plexes> <plex-attributes> <plex-name>/%(aggr2)s/plex0</plex-name> <raidgroups> <raidgroup-attributes> <raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name> </raidgroup-attributes> <raidgroup-attributes> <raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name> </raidgroup-attributes> </raidgroups> </plex-attributes> </plexes> <raid-type>%(raid_type2)s</raid-type> </aggr-raid-attributes> <aggregate-name>%(aggr2)s</aggregate-name> </aggr-attributes> </attributes-list> <num-records>2</num-records> </results> """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], 'raid_type1': SHARE_AGGREGATE_RAID_TYPES[0], 'raid_type2': SHARE_AGGREGATE_RAID_TYPES[1] }) STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <storage-disk-info> <disk-name>cluster3-01:v5.19</disk-name> <disk-raid-info> <effective-disk-type>%s</effective-disk-type> </disk-raid-info> </storage-disk-info> </attributes-list> <num-records>1</num-records> </results> """ % SHARE_AGGREGATE_DISK_TYPE) GET_AGGREGATE_FOR_VOLUME_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <volume-attributes> <volume-id-attributes> <containing-aggregate-name>%(aggr)s</containing-aggregate-name> <name>%(share)s</name> <owning-vserver-name>os_aa666789-5576-4835-87b7-868069856459</owning-vserver-name> </volume-id-attributes> </volume-attributes> </attributes-list> <num-records>1</num-records> </results> """ % { 'aggr': SHARE_AGGREGATE_NAME, 'share': SHARE_NAME }) EXPORT_RULE_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <export-rule-info> <client-match>%(rule)s</client-match> <policy-name>%(policy)s</policy-name> <rule-index>3</rule-index> <vserver-name>manila_svm</vserver-name> </export-rule-info> <export-rule-info> <client-match>%(rule)s</client-match> <policy-name>%(policy)s</policy-name> <rule-index>1</rule-index> <vserver-name>manila_svm</vserver-name> </export-rule-info> </attributes-list> <num-records>2</num-records> </results> """ % {'policy': EXPORT_POLICY_NAME, 'rule': IP_ADDRESS}) VOLUME_GET_EXPORT_POLICY_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <volume-attributes> <volume-export-attributes> <policy>%(policy)s</policy> </volume-export-attributes> <volume-id-attributes> <name>%(volume)s</name> <owning-vserver-name>manila_svm</owning-vserver-name> </volume-id-attributes> </volume-attributes> </attributes-list> <num-records>1</num-records> </results> """ % {'policy': EXPORT_POLICY_NAME, 'volume': SHARE_NAME}) DELETED_EXPORT_POLICY_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <export-policy-info> <policy-name>%(policy1)s</policy-name> <vserver>%(vserver)s</vserver> </export-policy-info> <export-policy-info> <policy-name>%(policy2)s</policy-name> <vserver>%(vserver)s</vserver> </export-policy-info> <export-policy-info> <policy-name>%(policy3)s</policy-name> <vserver>%(vserver2)s</vserver> </export-policy-info> </attributes-list> <num-records>2</num-records> </results> """ % { 'vserver': VSERVER_NAME, 'vserver2': VSERVER_NAME_2, 'policy1': DELETED_EXPORT_POLICIES[VSERVER_NAME][0], 'policy2': DELETED_EXPORT_POLICIES[VSERVER_NAME][1], 'policy3': DELETED_EXPORT_POLICIES[VSERVER_NAME_2][0], }) LUN_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <lun-info> <path>/vol/%(volume)s/fakelun</path> <qtree /> <volume>%(volume)s</volume> <vserver>%(vserver)s</vserver> </lun-info> </attributes-list> <num-records>1</num-records> </results> """ % { 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, }) VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <volume-attributes> <volume-id-attributes> <name>fake_volume</name> <owning-vserver-name>test</owning-vserver-name> </volume-id-attributes> </volume-attributes> </attributes-list> <num-records>1</num-records> </results> """) VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <volume-attributes> <volume-id-attributes> <containing-aggregate-name>%(aggr)s</containing-aggregate-name> <junction-path>/%(volume)s</junction-path> <name>%(volume)s</name> <owning-vserver-name>%(vserver)s</owning-vserver-name> <style>flex</style> <type>rw</type> </volume-id-attributes> <volume-space-attributes> <size>%(size)s</size> </volume-space-attributes> </volume-attributes> </attributes-list> <num-records>1</num-records> </results> """ % { 'aggr': SHARE_AGGREGATE_NAME, 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, 'size': SHARE_SIZE, }) SIS_GET_ITER_RESPONSE = etree.XML(""" <results status="passed"> <attributes-list> <sis-status-info> <is-compression-enabled>true</is-compression-enabled> <path>/vol/%(volume)s</path> <state>enabled</state> <vserver>%(vserver)s</vserver> </sis-status-info> </attributes-list> </results> """ % { 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, })
######################################################################### # # # # # copyright 2002 Paul Henry Tremblay # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program; if not, write to the Free Software # # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA # # 02111-1307 USA # # # # # ######################################################################### import sys, os, tempfile, rtf2xml.copy, re class Colors: """ Change lines with color info from color numbers to the actual color names. """ def __init__(self, in_file, bug_handler, copy = None, run_level = 1 ): """ Required: 'file'--file to parse Optional: 'copy'-- whether to make a copy of result for debugging 'temp_dir' --where to output temporary results (default is directory from which the script is run.) Returns: nothing """ self.__file = in_file self.__copy = copy self.__bug_handler = bug_handler self.__write_to = tempfile.mktemp() self.__run_level = run_level def __initiate_values(self): """ Initiate all values. """ self.__color_dict = {} self.__state = 'before_color_table' self.__state_dict = { 'before_color_table': self.__before_color_func, 'in_color_table' : self.__in_color_func, 'after_color_table' : self.__after_color_func, 'cw<ci<red_______' : self.__default_color_func, 'cw<ci<green_____' : self.__default_color_func, 'cw<ci<blue______' : self.__blue_func, 'tx<nu<__________' : self.__do_nothing_func, } self.__color_string = '#' self.__color_num = 1 self.__line_color_exp = re.compile(r'bdr-color_:(\d+)') # cw<bd<bor-par-to<nu<bdr-hair__|bdr-li-wid:0.50|bdr-sp-wid:1.00|bdr-color_:2 def __before_color_func(self, line): """ Requires: line Returns: nothing Logic: Check to see if the line marks the beginning of the color table. If so, change states. Always print out the line. """ # mi<mk<clrtbl-beg if self.__token_info == 'mi<mk<clrtbl-beg': self.__state = 'in_color_table' self.__write_obj.write(line) def __default_color_func(self, line): """ Requires: line Returns: nothing Logic: get the hex number from the line and add it to the color string. """ hex_num = line[-3:-1] self.__color_string += hex_num def __blue_func(self, line): """ Requires: line Returns: nothing Logic: Get the hex number from the line and add it to the color string. Add a key -> value pair to the color dictionary, with the number as the key, and the hex number as the value. Write an empty tag with the hex number and number as attributes. Add one to the color number. Reset the color string to '#' """ hex_num = line[-3:-1] self.__color_string += hex_num self.__color_dict[self.__color_num] = self.__color_string self.__write_obj.write( 'mi<tg<empty-att_' '<color-in-table<num>%s<value>%s\n' % (self.__color_num, self.__color_string) ) self.__color_num += 1 self.__color_string = '#' def __in_color_func(self, line): """ Requires: line Returns: nothing Logic: Check if the end of the color table has been reached. If so, change the state to after the color table. Othewise, get a function by passing the self.__token_info to the state dictionary. """ #mi<mk<clrtbl-beg #cw<ci<red_______<nu<00 if self.__token_info == 'mi<mk<clrtbl-end': self.__state = 'after_color_table' else: action = self.__state_dict.get(self.__token_info) if action == None: sys.stderr.write('in module colors.py\n' 'function is self.__in_color_func\n' 'no action for %s' % self.__token_info ) action(line) def __after_color_func(self, line): """ Check the to see if it contains color info. If it does, extract the number and look up the hex value in the color dictionary. If the color dictionary has no key for the number, print out an error message. Otherwise, print out the line. Added Oct 10, 2003 If the number is 0, that indicates no color """ #cw<ci<font-color<nu<2 if self.__token_info == 'cw<ci<font-color': hex_num = int(line[20:-1]) hex_num = self.__figure_num(hex_num) if hex_num: self.__write_obj.write( 'cw<ci<font-color<nu<%s\n' % hex_num ) elif line[0:5] == 'cw<bd': the_index = line.find('bdr-color_') if the_index > -1: line = re.sub(self.__line_color_exp, self.__sub_from_line_color, line) self.__write_obj.write(line) """ if num == 0: hex_num = 'false' else: hex_num = self.__color_dict.get(num) if hex_num == None: if self.__run_level > 0: sys.stderr.write( 'module is colors.py\n' 'function is self.__after_color_func\n' 'no value in self.__color_dict for key %s\n' % num ) if self.__run_level > 3: sys.stderr.write( 'run level is %s\n' 'Script will now quit\n' % self.__run_level) else: self.__write_obj.write( 'cw<ci<font-color<nu<%s\n' % hex_num ) """ else: self.__write_obj.write(line) # cw<bd<bor-par-to<nu<bdr-hair__|bdr-li-wid:0.50|bdr-sp-wid:1.00|bdr-color_:2 def __sub_from_line_color(self, match_obj): num = match_obj.group(1) try: num = int(num) except ValueError: if self.__run_level > 3: msg = 'can\'t make integer from string\n' raise self.__bug_handler, msg else: return 'bdr-color_:no-value' hex_num = self.__figure_num(num) return_value = 'bdr-color_:%s' % hex_num return return_value def __figure_num(self, num): if num == 0: hex_num = 'false' else: hex_num = self.__color_dict.get(num) if hex_num == None: if self.__run_level > 3: msg = 'no value in self.__color_dict for key %s\n' % num raise self.__bug_hanlder, msg if hex_num == None: hex_num = '0' return hex_num def __do_nothing_func(self, line): """ Bad RTF will have text in the color table """ pass def convert_colors(self): """ Requires: nothing Returns: nothing (changes the original file) Logic: Read one line in at a time. Determine what action to take based on the state. If the state is before the color table, look for the beginning of the color table. If the state is in the color table, create the color dictionary and print out the tags. If the state if afer the color table, look for lines with color info, and substitute the number with the hex number. """ self.__initiate_values() read_obj = open(self.__file, 'r') self.__write_obj = open(self.__write_to, 'w') line_to_read = 1 while line_to_read: line_to_read = read_obj.readline() line = line_to_read self.__token_info = line[:16] action = self.__state_dict.get(self.__state) if action == None: sys.stderr.write('no no matching state in module fonts.py\n') sys.stderr.write(self.__state + '\n') action(line) read_obj.close() self.__write_obj.close() copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler) if self.__copy: copy_obj.copy_file(self.__write_to, "color.data") copy_obj.rename(self.__write_to, self.__file) os.remove(self.__write_to)
""" @file wifi.py """ ## # @addtogroup sanity sanity # @brief This is sanity component # @{ # @addtogroup comm_wifi_connect comm_wifi_connect # @brief This is comm_wifi_connect module # @{ ## import time import os import string from oeqa.utils.helper import shell_cmd_timeout class WiFiFunction(object): """ @class WiFiFunction """ service = "" log = "" def __init__(self, target): self.target = target # un-block software rfkill lock self.target.run('rfkill unblock all') def target_collect_info(self, cmd): """ @fn target_collect_info @param self @param cmd @return """ (status, output) = self.target.run(cmd) self.log = self.log + "\n\n[Debug] Command output --- %s: \n" % cmd self.log = self.log + output def enable_wifi(self): """ @fn enable_wifi @param self @return """ # Enable WiFi (status, output) = self.target.run('connmanctl enable wifi') assert status == 0, "Error messages: %s" % output time.sleep(1) def disable_wifi(self): ''' disable wifi after testing @fn disable_wifi @param self @return ''' (status, output) = self.target.run('connmanctl disable wifi') assert status == 0, "Error messages: %s" % output # sleep some seconds to ensure disable is done time.sleep(2) def scan_wifi(self, ap_type, ssid): """ @fn scan_wifi @param self @param ap_type: hidden or broadcast @return service string of AP """ if (ap_type == "hidden"): ssid = "hidden_managed_psk" elif (ap_type == "hidden-wep"): ssid = "hidden_managed_wep" # Retry 4 times scan if needed retry = 0 while (retry < 4): (status, output) = self.target.run('connmanctl scan wifi') assert status == 0, "Error messages: %s" % output (status, output) = self.target.run("connmanctl services | grep %s" % ssid) retry = retry + 1 if (status == 0): break else: self.target_collect_info("connmanctl services") # Collect info self.target_collect_info("ifconfig") assert status == 0, "Not found hidden AP service" + self.log if "hidden" in ap_type: return output.strip() elif (ap_type == "broadcast"): return output.split(" ")[-1] def connect_wifi(self, ap_type, ssid, pwd): '''connmanctl to connect wifi AP @fn connect_wifi @param self @return ''' target_ip = self.target.ip for i in range(3): service = self.scan_wifi(ap_type, ssid) # Do connection if (ap_type == "broadcast"): exp = os.path.join(os.path.dirname(__file__), "files/wifi_connect.exp") cmd = "expect %s %s %s %s %s" % (exp, target_ip, "connmanctl", service, pwd) elif "hidden" in ap_type: exp = os.path.join(os.path.dirname(__file__), "files/wifi_hidden_connect.exp") cmd = "expect %s %s %s %s %s %s" % (exp, target_ip, "connmanctl", service, ssid, pwd) else: assert False, "ap_type must be broadcast or hidden, check config" # execute connection expect script status, output = shell_cmd_timeout(cmd, timeout=60) if status == 2: break assert status == 2, "Error messages: %s" % output def get_wifi_ipv4(self): ''' Get wifi ipv4 address @fn get_wifi_ipv4 @param self @return ''' time.sleep(3) # Check ip address by ifconfig command wifi_interface = "nothing" (status, wifi_interface) = self.target.run("ifconfig | grep '^wlp\|^wlan' | awk '{print $1}'") (status, output) = self.target.run("ifconfig %s | grep 'inet addr:'" % wifi_interface) self.target_collect_info("ifconfig") assert status == 0, "Error messages: %s" % self.log return output.split()[1].split(':')[1] def wifi_ip_check(self): '''check if the target gets ip address @fn wifi_ip_check @param self @return ''' time.sleep(3) # Check ip address by ifconfig command wifi_interface = "nothing" (status, wifi_interface) = self.target.run("ifconfig | grep '^wlp\|^wlan' | awk '{print $1}'") (status, output) = self.target.run("ifconfig %s | grep 'inet addr:'" % wifi_interface) assert status == 0, "Error messages: %s" % output # Collect info self.target_collect_info("ifconfig") assert status == 0, "IP check failed" + self.log def connect_without_password(self, ssid): '''connmanctl to connect wifi AP without password @fn connect_without_password @param self @param ssid: WiFi AP ssid, in the services list already @return ''' self.target.run('connmanctl scan wifi') time.sleep(1) self.target_collect_info('connmanctl services') (status, service) = self.target.run('connmanctl services | grep "%s"' % ssid) time.sleep(1) assert status == 0, "Do not get AP service: %s" % self.log # Directly execute connmanctl to connect AP (status, service) = self.target.run('connmanctl connect %s' % service) time.sleep(10) self.wifi_ip_check() def check_internet_connection(self, url): ''' Check if the target is able to connect to internet by wget @fn check_internet_connection @param self @return ''' # wget internet content self.target.run("rm -f index.html") time.sleep(1) for i in range(3): (status, output) = self.target.run("wget %s" % url, timeout=100) if status == 0: break time.sleep(3) self.target_collect_info("route") assert status == 0, "Error messages: %s" % self.log def execute_connection(self, ap_type, ssid, pwd): '''do a full round of wifi connection without disable @fn execut_connection @param self @param ap_type: must be broadcast or hidden @param ssid @param pwd @return ''' self.enable_wifi() # Use sleep because wifi_enable will trigger auto-connect (to last AP) time.sleep(30) self.connect_wifi(ap_type, ssid, pwd) self.wifi_ip_check() def ipv4_ssh_to(self, ipv4): ''' On main target, ssh to second @fn ipv4_ssh_to @param self @param ipv4: second target ipv4 address @return ''' ssh_key = os.path.join(os.path.dirname(__file__), "../bluetooth/files/ostro_qa_rsa") self.target.copy_to(ssh_key, "/tmp/") self.target.run("chmod 400 /tmp/ostro_qa_rsa") exp = os.path.join(os.path.dirname(__file__), "files/ssh_to.exp") exp_cmd = 'expect %s %s %s' % (exp, self.target.ip, ipv4) (status, output) = shell_cmd_timeout(exp_cmd) assert status == 2, "Error messages: %s" % output def scp_to(self, file_path, ipv4): ''' On main target, scp file to second @fn scp_to @param self @param file_path: the file to be scp to second device @param ipv4: second target ipv4 address @return ''' # This function assumes two devices already get ssh-key exchanged. scp_cmd = 'scp -i /tmp/ostro_qa_rsa %s root@%s:/home/root/' % (file_path, ipv4) (status, output) = self.target.run(scp_cmd, timeout=2000) assert status == 0, "Scp fails: %s" % output ## # @} # @} ##
import ctypes import socket import threading import time import sys from zeroconf import ServiceBrowser, Zeroconf import logging logger = logging.getLogger('c6dwifi') logger.setLevel(logging.INFO) libgphoto_names = ['libgphoto2.so.6', 'libgphoto2.6.dylib'] class GPhotoError(Exception): def __init__(self, result, message): self.result = result self.message = message def __str__(self): return self.message + ' (' + str(self.result) + ')' class GPhoto2Binder(): def __init__(self): self.gphoto = self.find_gphoto2() self.bind_gphoto() self.GP_CAPTURE_IMAGE = 0 self.GP_CAPTURE_MOVIE = 1 self.GP_CAPTURE_SOUND = 2 self.GP_EVENT_UNKNOWN = 0 self.GP_EVENT_TIMEOUT = 1 self.GP_EVENT_FILE_ADDED = 2 self.GP_EVENT_FOLDER_ADDED = 3 self.GP_EVENT_CAPTURE_COMPLETE = 4 def get_gphoto(self): return self.gphoto @staticmethod def find_gphoto2(): for libgphoto_name in libgphoto_names: gphoto2_candidate = None try: gphoto2_candidate = ctypes.CDLL(libgphoto_name) except OSError: pass if gphoto2_candidate is not None: logger.info('Using {0}'.format(libgphoto_name)) return gphoto2_candidate logger.error('No libgphoto2 found') def bind_gphoto(self): self.gphoto.gp_context_new.restype = ctypes.c_void_p self.gphoto.gp_camera_init.argtypes = [ctypes.c_void_p, ctypes.c_void_p] self.gphoto.gp_context_unref.argtypes = [ctypes.c_void_p] self.gphoto.gp_abilities_list_lookup_model.argtypes = [ctypes.c_void_p, ctypes.c_char_p] self.gphoto.gp_result_as_string.restype = ctypes.c_char_p self.gphoto.gp_log_add_func.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] self.gphoto.gp_setting_set.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] self.gphoto.gp_camera_set_abilities.argtypes = [ctypes.c_void_p, ctypes.Structure] class CameraAbilities(ctypes.Structure): _fields_ = [('model', (ctypes.c_char * 128)), ('data', (ctypes.c_char * 4096))] class CameraFilePath(ctypes.Structure): _fields_ = [('name', (ctypes.c_char * 128)), ('folder', (ctypes.c_char * 1024))] class GPhotoError(Exception): def __init__(self, result, message): self.result = result self.message = message def __str__(self): return self.message + ' (' + str(self.result) + ')' class Common: log_label = 'Common' def log(self, msg, debug=False): logger.error('{0} {1}'.format(self.log_label, msg)) def debug(self, msg): logger.error(msg) def start(self): def run(): self.log('started thread') self.run() self.log('finished thread') self.log('starting thread') self.thread = threading.Thread(target=run) self.thread.start() def join(self, timeout=None): if not self.thread.isAlive(): pass elif timeout: self.thread.join(timeout=timeout) else: self.thread.join() return not self.thread.isAlive() def shutdown(self): pass class PTPIPCamera(Common): log_label = 'PTPIPCamera' def __init__(self, target, guid): self.context = ctypes.c_void_p() # gphoto.gp_context_new() self.target = target self.guid = guid self.handle = ctypes.c_void_p() self.portlist = None self.abilitylist = None self.connected = False self.cached_root = None self.cached_time = 0 self.cache_expiry = 2 # seconds self.gp2binder = GPhoto2Binder() self.gphoto = self.gp2binder.get_gphoto() def gphoto_check(self, result): if result < 0: message = self.gphoto.gp_result_as_string(result).decode() raise GPhotoError(result, message) return result def encoded_path(self): return ("ptpip:" + self.target).encode('utf-8') def encoded_guid(self): tmp = self.guid.split("-") guid = [] l = lambda s: [s[i:i + 2:] for i in range(0, len(s), 2)][::-1] for i in range(0, 3): guid += l(tmp[i]) guid += tmp[3] guid += tmp[4] tmp = "".join(guid).lower() guid = [] for i in range(0, len(tmp), 2): guid.append(tmp[i:i + 2]) guid = ":".join(guid) return guid.encode('utf-8') def connect(self): # allocate and initialise a new camera self.debug('allocate camera') res = self.gphoto.gp_camera_new(ctypes.pointer(self.handle)) self.gphoto_check(res) # set model and guid in settings file self.gphoto.gp_setting_set(b"gphoto2", b"model", b"PTP/IP Camera") self.gphoto.gp_setting_set(b"ptp2_ip", b"guid", self.encoded_guid()) # load abilities if not self.abilitylist: self.debug('load abilities list') self.abilitylist = ctypes.c_void_p() self.gphoto.gp_abilities_list_new(ctypes.pointer(self.abilitylist)) res = self.gphoto.gp_abilities_list_load(self.abilitylist, self.context) self.gphoto_check(res) # search for model abilities self.debug('search abilities list') index = self.gphoto.gp_abilities_list_lookup_model(self.abilitylist, b'PTP/IP Camera') self.gphoto_check(index) self.debug('found at %d' % index) # load abilities self.debug('load abilities') abilities = GPhoto2Binder.CameraAbilities() res = self.gphoto.gp_abilities_list_get_abilities(self.abilitylist, index, ctypes.pointer(abilities)) self.gphoto_check(res) # set camera abilities self.debug('set camera abilities') res = self.gphoto.gp_camera_set_abilities(self.handle, abilities) self.gphoto_check(res) # load port list if not self.portlist: self.debug('load port list') self.portlist = ctypes.c_void_p() self.gphoto.gp_port_info_list_new(ctypes.pointer(self.portlist)) res = self.gphoto.gp_port_info_list_load(self.portlist) self.gphoto_check(res) # find port info entry self.debug('search for port info') index = self.gphoto.gp_port_info_list_lookup_path(self.portlist, self.encoded_path()) self.gphoto_check(index) self.debug('found at %d' % index) # load port info entry self.debug('load port info') info = ctypes.c_void_p() res = self.gphoto.gp_port_info_list_get_info(self.portlist, index, ctypes.pointer(info)) self.gphoto_check(res) # set the camera with the appropriate port info self.debug('set camera port') res = self.gphoto.gp_camera_set_port_info(self.handle, info) self.gphoto_check(res) # load the port path for debugging # if DEBUG: # path = ctypes.c_char_p() # res = self.gphoto.gp_port_info_get_path(info, ctypes.pointer(path)) # self.gphoto_check(res) # self.debug(path.value) # connect to camera self.log('connecting...') res = self.gphoto.gp_camera_init(self.handle, self.context) self.gphoto_check(res) self.log('connected.') self.connected = True return True def disconnect(self): self._clear_cache() res = self.gphoto.gp_camera_exit(self.handle, self.context) self.gphoto_check(res) res = self.gphoto.gp_camera_unref(self.handle) self.gphoto_check(res) res = self.gphoto.gp_context_unref(self.context) self.gphoto_check(res) # FIXME: gphoto PTP/IP does not close sockets properly; try to work around? def _root_widget(self): now = time.time() if (not self.cached_root) or abs(now - self.cached_time) > self.cache_expiry: if not self.cached_root: self.gphoto.gp_widget_free(self.cached_root) self.cached_root = None root = ctypes.c_void_p() res = self.gphoto.gp_camera_get_config(self.handle, ctypes.pointer(root), self.context) if res >= 0: self.cached_root = root self.cached_time = now return self.cached_root def _clear_cache(self): if self.cached_root: self.gphoto.gp_widget_free(self.cached_root) self.cached_root = None def _find_widget(self, label): root = self._root_widget() if root: child = ctypes.c_void_p() res = self.gphoto.gp_widget_get_child_by_name(root, ctypes.c_char_p(label), ctypes.pointer(child)) if res >= 0: return (root, child) return None widget_types = {0: 'window', 1: 'section', 2: 'text', 3: 'range', 4: 'toggle', 5: 'radio', 6: 'menu', 7: 'button', 8: 'date'} def _widget_type(self, pair): (root, child) = pair w_type = ctypes.c_int() res = self.gphoto.gp_widget_get_type(child, ctypes.pointer(w_type)) self.gphoto_check(res) w_type = w_type.value if w_type in self.widget_types: return self.widget_types[w_type] else: return 'unknown' def _widget_value(self, pair): (root, child) = pair w_type = self._widget_type(pair) if w_type == 'text' or w_type == 'menu' or w_type == 'radio': ptr = ctypes.c_char_p() res = self.gphoto.gp_widget_get_value(child, ctypes.pointer(ptr)) self.gphoto_check(res) return (w_type, ptr.value) elif w_type == 'range': top = ctypes.c_float() bottom = ctypes.c_float() step = ctypes.c_float() value = ctypes.c_float() res = self.gphoto.gp_widget_get_range(child, ctypes.pointer(bottom), ctypes.pointer(top), ctypes.pointer(step)) self.gphoto_check(res) res = self.gphoto.gp_widget_get_value(child, ctypes.pointer(value)) self.gphoto_check(res) return (w_type, value.value, bottom.value, top.value, step.value) elif w_type == 'toggle' or w_type == 'date': value = ctypes.c_int() res = self.gphoto.gp_widget_get_value(child, ctypes.pointer(value)) self.gphoto_check(res) return (w_type, value.value) else: return None def _match_choice(self, pair, value): choices = self._widget_choices(pair) if isinstance(value, int): if (value >= 0) and (value < len(choices)): return choices[value] for (i, c) in zip(range(len(choices)), choices): try: if c == str(value): return c elif float(c) == float(value): return c elif int(c) == int(value): return c except: pass if isinstance(value, str): return value else: return str(value) def _widget_set(self, pair, value): (root, child) = pair w_type = self._widget_type(pair) if w_type == 'toggle': if value: value = 1 else: value = 0 elif w_type == 'range': value = float(value) elif (w_type == 'radio') or (w_type == 'menu'): value = self._match_choice(pair, value) if isinstance(value, int): v = ctypes.c_int(value) res = self.gphoto.gp_widget_set_value(child, ctypes.pointer(v)) return (res >= 0) elif isinstance(value, float): v = ctypes.c_float(float(value)) res = self.gphoto.gp_widget_set_value(child, ctypes.pointer(v)) return (res >= 0) elif isinstance(value, str): v = ctypes.c_char_p(value) res = self.gphoto.gp_widget_set_value(child, v) return (res >= 0) else: return False def _widget_choices(self, pair): (root, child) = pair w_type = self._widget_type(pair) if w_type == 'radio' or w_type == 'menu': count = self.gphoto.gp_widget_count_choices(child) if count > 0: choices = [] for i in range(count): ptr = ctypes.c_char_p() res = self.gphoto.gp_widget_get_choice(child, i, ctypes.pointer(ptr)) self.gphoto_check(res) choices.append(ptr.value) return choices return None def get_config(self, label): pair = self._find_widget(label) value = None if pair: value = self._widget_value(pair) return value def get_config_choices(self, label): pair = self._find_widget(label) value = None if pair: value = self._widget_choices(pair) return value def set_config(self, label, value): pair = self._find_widget(label) result = False if pair: result = self._widget_set(pair, value) if result: res = self.gphoto.gp_camera_set_config(self.handle, pair[0], self.context) result = (res >= 0) return result known_widgets = [ 'uilock', 'bulb', 'drivemode', 'focusmode', 'autofocusdrive', 'manualfocusdrive', 'eoszoom', 'eoszoomposition', 'eosviewfinder', 'eosremoterelease', 'serialnumber', 'manufacturer', 'cameramodel', 'deviceversion', 'model', 'batterylevel', 'lensname', 'eosserialnumber', 'shuttercounter', 'availableshots', 'reviewtime', 'output', 'evfmode', 'ownername', 'artist', 'copyright', 'autopoweroff', 'imageformat', 'imageformatsd', 'iso', 'whitebalance', 'colortemperature', 'whitebalanceadjusta', 'whitebalanceadjustb', 'whitebalancexa', 'whitebalancexb', 'colorspace' 'exposurecompensation', 'focusmode', 'autoexposuremode', 'picturestyle', 'shutterspeed', 'bracketmode', 'aeb', 'aperture', 'capturetarget'] def list_config(self): config = {} for k in self.known_widgets: config[k] = self.get_config(k) return config # XXX: this hangs waiting for response from camera def trigger_capture(self): res = self.gphoto.gp_camera_trigger_capture(self.handle, self.context) try: self.gphoto_check(res) return True except GPhotoError as e: self.log(str(e)) return False # XXX: this hangs waiting for response from camera # def capture(self, capture_type=GP_CAPTURE_IMAGE): # path = CameraFilePath() # res = self.gphoto.gp_camera_capture(self.handle, ctypes.c_int(capture_type), ctypes.pointer(path), self.context) # try: # self.gphoto_check(res) # return (path.folder, path.name) # except GPhotoError as e: # self.log(str(e)) # return None def wait_for_event(self, timeout=10): ev_type = ctypes.c_int() data = ctypes.c_void_p() res = self.gphoto.gp_camera_capture(self.handle, ctypes.c_int(timeout), ctypes.pointer(ev_type), ctypes.pointer(data), self.context) try: self.gphoto_check(res) return ev_type.value except GPhotoError as e: self.log(str(e)) return None class Canon6DConnection(Common): log_label = 'Canon6DConnection' def __init__(self, ip, guid, callback): self.ip = ip self.guid = guid self.callback = callback def run(self): self.log('started %s (%s)' % (self.ip, self.guid)) self.camera = PTPIPCamera(self.ip, self.guid) try: self.camera.connect() print('connected to %s (%s)' % (self.ip, self.guid)) self.callback(self.camera) except Exception as e: logger.error('failed for {0} ({1}) - {2}'.format(self.ip, self.guid, e)) finally: try: self.camera.disconnect() except: pass self.log('shutdown %s (%s)' % (self.ip, self.guid)) class Canon6DConnector(Common): def __init__(self, callback): self.callback = callback self.connections = [] zeroconf = Zeroconf() listener = self browser = ServiceBrowser(zeroconf, "_ptp._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_dlna._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_daap._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_dacp._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_touch-able._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_rsp._tcp.local.", listener) browser = ServiceBrowser(zeroconf, "_rsp._tcp.local.", listener) try: input("Press enter to exit...\n\n") finally: zeroconf.close() def connect(self, ip, guid): logger.error('Connecting to {0}, {1}'.format(ip, guid)) if len(self.connections) == 0: connection = Canon6DConnection(ip, guid, self.callback) connection.start() self.connections.append(connection) def remove_service(self, zeroconf, type, name): print("Service %s removed" % (name,)) def add_service(self, zeroconf, type, name): info = zeroconf.get_service_info(type, name) print("Service %s added, service info: %s" % (name, info)) if info is not None: try: guid = info.properties[b'sid.canon.com'].decode() ip = socket.inet_ntoa(info.address) self.connect(ip, guid) except: logger.error('not a canon') def test_callback(camera): print('camera_main', camera.guid) camera.set_config('capture', 1) config = camera.list_config() print('got config') for k in sorted(config.keys()): v = config[k] if v and (v[0] == 'radio'): print(k, v, camera.get_config_choices(k)) else: print(k, v) result = camera.set_config('aperture', '8.0') print('set aperture', result) result = camera.set_config('capturetarget', 'Memory card') print('set memory card', result) result = camera.set_config('eosremoterelease', 'Immediate') print('trigger capture', result) time.sleep(1) def main(args): Canon6DConnector(test_callback) if __name__ == "__main__": main(sys.argv[1:])
# SRE test harness for the Python regression suite # this is based on test_re.py, but uses a test function instead # of all those asserts import sys sys.path=['.']+sys.path from test_support import verbose, TestFailed, have_unicode import sre import sys, os, string, traceback # # test support def test(expression, result, exception=None): try: r = eval(expression) except: if exception: if not isinstance(sys.exc_value, exception): print expression, "FAILED" # display name, not actual value if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got", sys.exc_type.__name__, str(sys.exc_value) else: print expression, "FAILED" traceback.print_exc(file=sys.stdout) else: if exception: print expression, "FAILED" if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got result", repr(r) else: if r != result: print expression, "FAILED" print "expected", repr(result) print "got result", repr(r) if verbose: print 'Running tests on character literals' for i in [0, 8, 16, 32, 64, 127, 128, 255]: test(r"""sre.match(r"\%03o" % i, chr(i)) is not None""", 1) test(r"""sre.match(r"\%03o0" % i, chr(i)+"0") is not None""", 1) test(r"""sre.match(r"\%03o8" % i, chr(i)+"8") is not None""", 1) test(r"""sre.match(r"\x%02x" % i, chr(i)) is not None""", 1) test(r"""sre.match(r"\x%02x0" % i, chr(i)+"0") is not None""", 1) test(r"""sre.match(r"\x%02xz" % i, chr(i)+"z") is not None""", 1) test(r"""sre.match("\911", "")""", None, sre.error) # # Misc tests from Tim Peters' re.doc if verbose: print 'Running tests on sre.search and sre.match' test(r"""sre.search(r'x*', 'axx').span(0)""", (0, 0)) test(r"""sre.search(r'x*', 'axx').span()""", (0, 0)) test(r"""sre.search(r'x+', 'axx').span(0)""", (1, 3)) test(r"""sre.search(r'x+', 'axx').span()""", (1, 3)) test(r"""sre.search(r'x', 'aaa')""", None) test(r"""sre.match(r'a*', 'xxx').span(0)""", (0, 0)) test(r"""sre.match(r'a*', 'xxx').span()""", (0, 0)) test(r"""sre.match(r'x*', 'xxxa').span(0)""", (0, 3)) test(r"""sre.match(r'x*', 'xxxa').span()""", (0, 3)) test(r"""sre.match(r'a+', 'xxx')""", None) # bug 113254 test(r"""sre.match(r'(a)|(b)', 'b').start(1)""", -1) test(r"""sre.match(r'(a)|(b)', 'b').end(1)""", -1) test(r"""sre.match(r'(a)|(b)', 'b').span(1)""", (-1, -1)) if verbose: print 'Running tests on sre.sub' test(r"""sre.sub(r"(?i)b+", "x", "bbbb BBBB")""", 'x x') def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1) test(r"""sre.sub(r'\d+', bump_num, '08.2 -2 23x99y')""", '9.3 -3 24x100y') test(r"""sre.sub(r'\d+', bump_num, '08.2 -2 23x99y', 3)""", '9.3 -3 23x99y') test(r"""sre.sub(r'.', lambda m: r"\n", 'x')""", '\\n') test(r"""sre.sub(r'.', r"\n", 'x')""", '\n') s = r"\1\1" test(r"""sre.sub(r'(.)', s, 'x')""", 'xx') test(r"""sre.sub(r'(.)', sre.escape(s), 'x')""", s) test(r"""sre.sub(r'(.)', lambda m: s, 'x')""", s) test(r"""sre.sub(r'(?P<a>x)', '\g<a>\g<a>', 'xx')""", 'xxxx') test(r"""sre.sub(r'(?P<a>x)', '\g<a>\g<1>', 'xx')""", 'xxxx') test(r"""sre.sub(r'(?P<unk>x)', '\g<unk>\g<unk>', 'xx')""", 'xxxx') test(r"""sre.sub(r'(?P<unk>x)', '\g<1>\g<1>', 'xx')""", 'xxxx') # bug 449964: fails for group followed by other escape test(r"""sre.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx')""", 'xx\bxx\b') test(r"""sre.sub(r'a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a')""", '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D') test(r"""sre.sub(r'a', '\t\n\v\r\f\a', 'a')""", '\t\n\v\r\f\a') test(r"""sre.sub(r'a', '\t\n\v\r\f\a', 'a')""", (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))) test(r"""sre.sub(r'^\s*', 'X', 'test')""", 'Xtest') # qualified sub test(r"""sre.sub(r'a', 'b', 'aaaaa')""", 'bbbbb') test(r"""sre.sub(r'a', 'b', 'aaaaa', 1)""", 'baaaa') # bug 114660 test(r"""sre.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there')""", 'hello there') # Test for sub() on escaped characters, see SF bug #449000 test(r"""sre.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n') test(r"""sre.sub('\r\n', r'\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n') test(r"""sre.sub(r'\r\n', '\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n') test(r"""sre.sub('\r\n', '\n', 'abc\r\ndef\r\n')""", 'abc\ndef\n') # Test for empty sub() behaviour, see SF bug #462270 test(r"""sre.sub('x*', '-', 'abxd')""", '-a-b-d-') test(r"""sre.sub('x+', '-', 'abxd')""", 'ab-d') if verbose: print 'Running tests on symbolic references' test(r"""sre.sub(r'(?P<a>x)', '\g<a', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<a a>', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<1a1>', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<ab>', 'xx')""", None, IndexError) test(r"""sre.sub(r'(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)|(?P<b>y)', '\\2', 'xx')""", None, sre.error) if verbose: print 'Running tests on sre.subn' test(r"""sre.subn(r"(?i)b+", "x", "bbbb BBBB")""", ('x x', 2)) test(r"""sre.subn(r"b+", "x", "bbbb BBBB")""", ('x BBBB', 1)) test(r"""sre.subn(r"b+", "x", "xyz")""", ('xyz', 0)) test(r"""sre.subn(r"b*", "x", "xyz")""", ('xxxyxzx', 4)) test(r"""sre.subn(r"b*", "x", "xyz", 2)""", ('xxxyz', 2)) if verbose: print 'Running tests on sre.split' test(r"""sre.split(r":", ":a:b::c")""", ['', 'a', 'b', '', 'c']) test(r"""sre.split(r":+", ":a:b:::")""", ['', 'a', 'b', '']) test(r"""sre.split(r":*", ":a:b::c")""", ['', 'a', 'b', 'c']) test(r"""sre.split(r"(:*)", ":a:b::c")""", ['', ':', 'a', ':', 'b', '::', 'c']) test(r"""sre.split(r"(?::*)", ":a:b::c")""", ['', 'a', 'b', 'c']) test(r"""sre.split(r"(:)*", ":a:b::c")""", ['', ':', 'a', ':', 'b', ':', 'c']) test(r"""sre.split(r"([b:]+)", ":a:b::c")""", ['', ':', 'a', ':b::', 'c']) test(r"""sre.split(r"(b)|(:+)", ":a:b::c")""", ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c']) test(r"""sre.split(r"(?:b)|(?::+)", ":a:b::c")""", ['', 'a', '', '', 'c']) test(r"""sre.split(r":", ":a:b::c", 2)""", ['', 'a', 'b::c']) test(r"""sre.split(r':', 'a:b:c:d', 2)""", ['a', 'b', 'c:d']) test(r"""sre.split(r"(:)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c']) test(r"""sre.split(r"(:*)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c']) if verbose: print "Running tests on sre.findall" test(r"""sre.findall(r":+", "abc")""", []) test(r"""sre.findall(r":+", "a:b::c:::d")""", [":", "::", ":::"]) test(r"""sre.findall(r"(:+)", "a:b::c:::d")""", [":", "::", ":::"]) test(r"""sre.findall(r"(:)(:*)", "a:b::c:::d")""", [(":", ""), (":", ":"), (":", "::")]) test(r"""sre.findall(r"(a)|(b)", "abc")""", [("a", ""), ("", "b")]) # bug 117612 test(r"""sre.findall(r"(a|(b))", "aba")""", [("a", ""),("b", "b"),("a", "")]) if sys.hexversion >= 0x02020000: if verbose: print "Running tests on sre.finditer" def fixup(seq): # convert iterator to list if not hasattr(seq, "next") or not hasattr(seq, "__iter__"): print "finditer returned", type(seq) return map(lambda item: item.group(0), seq) # sanity test(r"""fixup(sre.finditer(r":+", "a:b::c:::d"))""", [":", "::", ":::"]) if verbose: print "Running tests on sre.match" test(r"""sre.match(r'a', 'a').groups()""", ()) test(r"""sre.match(r'(a)', 'a').groups()""", ('a',)) test(r"""sre.match(r'(a)', 'a').group(0)""", 'a') test(r"""sre.match(r'(a)', 'a').group(1)""", 'a') test(r"""sre.match(r'(a)', 'a').group(1, 1)""", ('a', 'a')) pat = sre.compile(r'((a)|(b))(c)?') test(r"""pat.match('a').groups()""", ('a', 'a', None, None)) test(r"""pat.match('b').groups()""", ('b', None, 'b', None)) test(r"""pat.match('ac').groups()""", ('a', 'a', None, 'c')) test(r"""pat.match('bc').groups()""", ('b', None, 'b', 'c')) test(r"""pat.match('bc').groups("")""", ('b', "", 'b', 'c')) pat = sre.compile(r'(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?') test(r"""pat.match('a').group(1, 2, 3)""", ('a', None, None)) test(r"""pat.match('b').group('a1', 'b2', 'c3')""", (None, 'b', None)) test(r"""pat.match('ac').group(1, 'b2', 3)""", ('a', None, 'c')) # bug 448951 (similar to 429357, but with single char match) # (Also test greedy matches.) for op in '','?','*': test(r"""sre.match(r'((.%s):)?z', 'z').groups()"""%op, (None, None)) test(r"""sre.match(r'((.%s):)?z', 'a:z').groups()"""%op, ('a:', 'a')) if verbose: print "Running tests on sre.escape" p = "" for i in range(0, 256): p = p + chr(i) test(r"""sre.match(sre.escape(chr(i)), chr(i)) is not None""", 1) test(r"""sre.match(sre.escape(chr(i)), chr(i)).span()""", (0,1)) pat = sre.compile(sre.escape(p)) test(r"""pat.match(p) is not None""", 1) test(r"""pat.match(p).span()""", (0,256)) if verbose: print 'Running tests on sre.Scanner' def s_ident(scanner, token): return token def s_operator(scanner, token): return "op%s" % token def s_float(scanner, token): return float(token) def s_int(scanner, token): return int(token) scanner = sre.Scanner([ (r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*", s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+", None), ]) # sanity check test('scanner.scan("sum = 3*foo + 312.50 + bar")', (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], '')) if verbose: print 'Pickling a SRE_Pattern instance' try: import pickle pat = sre.compile(r'a(?:b|(c|e){1,2}?|d)+?(.)') s = pickle.dumps(pat) pat = pickle.loads(s) except: print TestFailed, 're module pickle' try: import cPickle pat = sre.compile(r'a(?:b|(c|e){1,2}?|d)+?(.)') s = cPickle.dumps(pat) pat = cPickle.loads(s) except: print TestFailed, 're module cPickle' # constants test(r"""sre.I""", sre.IGNORECASE) test(r"""sre.L""", sre.LOCALE) test(r"""sre.M""", sre.MULTILINE) test(r"""sre.S""", sre.DOTALL) test(r"""sre.X""", sre.VERBOSE) test(r"""sre.T""", sre.TEMPLATE) test(r"""sre.U""", sre.UNICODE) for flags in [sre.I, sre.M, sre.X, sre.S, sre.L, sre.T, sre.U]: try: r = sre.compile('^pattern$', flags) except: print 'Exception raised on flag', flags if verbose: print 'Test engine limitations' # Try nasty case that overflows the straightforward recursive # implementation of repeated groups. test("sre.match('(x)*', 50000*'x').span()", (0, 50000), RuntimeError) test("sre.match(r'(x)*y', 50000*'x'+'y').span()", (0, 50001), RuntimeError) test("sre.match(r'(x)*?y', 50000*'x'+'y').span()", (0, 50001), RuntimeError) from re_tests import * if verbose: print 'Running re_tests test suite' else: # To save time, only run the first and last 10 tests #tests = tests[:10] + tests[-10:] pass for t in tests: sys.stdout.flush() pattern=s=outcome=repl=expected=None if len(t)==5: pattern, s, outcome, repl, expected = t elif len(t)==3: pattern, s, outcome = t else: raise ValueError, ('Test tuples should have 3 or 5 fields',t) try: obj=sre.compile(pattern) except sre.error: if outcome==SYNTAX_ERROR: pass # Expected a syntax error else: print '=== Syntax error:', t except KeyboardInterrupt: raise KeyboardInterrupt except: print '*** Unexpected error ***', t if verbose: traceback.print_exc(file=sys.stdout) else: try: result=obj.search(s) except (sre.error), msg: print '=== Unexpected exception', t, repr(msg) if outcome==SYNTAX_ERROR: print '=== Compiled incorrectly', t elif outcome==FAIL: if result is None: pass # No match, as expected else: print '=== Succeeded incorrectly', t elif outcome==SUCCEED: if result is not None: # Matched, as expected, so now we compute the # result string and compare it to our expected result. start, end = result.span(0) vardict={'found': result.group(0), 'groups': result.group(), 'flags': result.re.flags} for i in range(1, 100): try: gi = result.group(i) # Special hack because else the string concat fails: if gi is None: gi = "None" except IndexError: gi = "Error" vardict['g%d' % i] = gi for i in result.re.groupindex.keys(): try: gi = result.group(i) if gi is None: gi = "None" except IndexError: gi = "Error" vardict[i] = gi repl=eval(repl, vardict) if repl!=expected: print '=== grouping error', t, print repr(repl)+' should be '+repr(expected) else: print '=== Failed incorrectly', t continue # Try the match on a unicode string, and check that it # still succeeds. try: u = unicode(s, "latin-1") except NameError: pass except TypeError: continue # skip unicode test strings else: result=obj.search(u) if result==None: print '=== Fails on unicode match', t # Try the match on a unicode pattern, and check that it # still succeeds. try: u = unicode(pattern, "latin-1") except NameError: pass else: obj=sre.compile(u) result=obj.search(s) if result==None: print '=== Fails on unicode pattern match', t # Try the match with the search area limited to the extent # of the match and see if it still succeeds. \B will # break (because it won't match at the end or start of a # string), so we'll ignore patterns that feature it. if pattern[:2]!='\\B' and pattern[-2:]!='\\B': obj=sre.compile(pattern) result=obj.search(s, result.start(0), result.end(0)+1) if result==None: print '=== Failed on range-limited match', t # Try the match with IGNORECASE enabled, and check that it # still succeeds. obj=sre.compile(pattern, sre.IGNORECASE) result=obj.search(s) if result==None: print '=== Fails on case-insensitive match', t # Try the match with LOCALE enabled, and check that it # still succeeds. obj=sre.compile(pattern, sre.LOCALE) result=obj.search(s) if result==None: print '=== Fails on locale-sensitive match', t # Try the match with UNICODE locale enabled, and check # that it still succeeds. if have_unicode: obj=sre.compile(pattern, sre.UNICODE) result=obj.search(s) if result==None: print '=== Fails on unicode-sensitive match', t
"""Access to Python's configuration information.""" import os import sys from os.path import pardir, realpath __all__ = [ 'get_config_h_filename', 'get_config_var', 'get_config_vars', 'get_makefile_filename', 'get_path', 'get_path_names', 'get_paths', 'get_platform', 'get_python_version', 'get_scheme_names', 'parse_config_h', ] # Keys for get_config_var() that are never converted to Python integers. _ALWAYS_STR = { 'MACOSX_DEPLOYMENT_TARGET', } _INSTALL_SCHEMES = { 'posix_prefix': { 'stdlib': '{installed_base}/{platlibdir}/python{py_version_short}', 'platstdlib': '{platbase}/{platlibdir}/python{py_version_short}', 'purelib': '{base}/lib/python{py_version_short}/site-packages', 'platlib': '{platbase}/{platlibdir}/python{py_version_short}/site-packages', 'include': '{installed_base}/include/python{py_version_short}{abiflags}', 'platinclude': '{installed_platbase}/include/python{py_version_short}{abiflags}', 'scripts': '{base}/bin', 'data': '{base}', }, 'posix_home': { 'stdlib': '{installed_base}/lib/python', 'platstdlib': '{base}/lib/python', 'purelib': '{base}/lib/python', 'platlib': '{base}/lib/python', 'include': '{installed_base}/include/python', 'platinclude': '{installed_base}/include/python', 'scripts': '{base}/bin', 'data': '{base}', }, 'nt': { 'stdlib': '{installed_base}/Lib', 'platstdlib': '{base}/Lib', 'purelib': '{base}/Lib/site-packages', 'platlib': '{base}/Lib/site-packages', 'include': '{installed_base}/Include', 'platinclude': '{installed_base}/Include', 'scripts': '{base}/Scripts', 'data': '{base}', }, } # NOTE: site.py has copy of this function. # Sync it when modify this function. def _getuserbase(): env_base = os.environ.get("PYTHONUSERBASE", None) if env_base: return env_base # VxWorks has no home directories if sys.platform == "vxworks": return None def joinuser(*args): return os.path.expanduser(os.path.join(*args)) if os.name == "nt": base = os.environ.get("APPDATA") or "~" return joinuser(base, "Python") if sys.platform == "darwin" and sys._framework: return joinuser("~", "Library", sys._framework, f"{sys.version_info[0]}.{sys.version_info[1]}") return joinuser("~", ".local") _HAS_USER_BASE = (_getuserbase() is not None) if _HAS_USER_BASE: _INSTALL_SCHEMES |= { # NOTE: When modifying "purelib" scheme, update site._get_path() too. 'nt_user': { 'stdlib': '{userbase}/Python{py_version_nodot_plat}', 'platstdlib': '{userbase}/Python{py_version_nodot_plat}', 'purelib': '{userbase}/Python{py_version_nodot_plat}/site-packages', 'platlib': '{userbase}/Python{py_version_nodot_plat}/site-packages', 'include': '{userbase}/Python{py_version_nodot_plat}/Include', 'scripts': '{userbase}/Python{py_version_nodot_plat}/Scripts', 'data': '{userbase}', }, 'posix_user': { 'stdlib': '{userbase}/{platlibdir}/python{py_version_short}', 'platstdlib': '{userbase}/{platlibdir}/python{py_version_short}', 'purelib': '{userbase}/lib/python{py_version_short}/site-packages', 'platlib': '{userbase}/{platlibdir}/python{py_version_short}/site-packages', 'include': '{userbase}/include/python{py_version_short}', 'scripts': '{userbase}/bin', 'data': '{userbase}', }, 'osx_framework_user': { 'stdlib': '{userbase}/lib/python', 'platstdlib': '{userbase}/lib/python', 'purelib': '{userbase}/lib/python/site-packages', 'platlib': '{userbase}/lib/python/site-packages', 'include': '{userbase}/include/python{py_version_short}', 'scripts': '{userbase}/bin', 'data': '{userbase}', }, } _SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include', 'scripts', 'data') _PY_VERSION = sys.version.split()[0] _PY_VERSION_SHORT = f'{sys.version_info[0]}.{sys.version_info[1]}' _PY_VERSION_SHORT_NO_DOT = f'{sys.version_info[0]}{sys.version_info[1]}' _PREFIX = os.path.normpath(sys.prefix) _BASE_PREFIX = os.path.normpath(sys.base_prefix) _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) _BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) _CONFIG_VARS = None _USER_BASE = None # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)" _findvar1_rx = r"\$\(([A-Za-z][A-Za-z0-9_]*)\)" _findvar2_rx = r"\${([A-Za-z][A-Za-z0-9_]*)}" def _safe_realpath(path): try: return realpath(path) except OSError: return path if sys.executable: _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) else: # sys.executable can be empty if argv[0] has been changed and Python is # unable to retrieve the real program name _PROJECT_BASE = _safe_realpath(os.getcwd()) if (os.name == 'nt' and _PROJECT_BASE.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) # set for cross builds if "_PYTHON_PROJECT_BASE" in os.environ: _PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"]) def _is_python_source_dir(d): for fn in ("Setup", "Setup.local"): if os.path.isfile(os.path.join(d, "Modules", fn)): return True return False _sys_home = getattr(sys, '_home', None) if os.name == 'nt': def _fix_pcbuild(d): if d and os.path.normcase(d).startswith( os.path.normcase(os.path.join(_PREFIX, "PCbuild"))): return _PREFIX return d _PROJECT_BASE = _fix_pcbuild(_PROJECT_BASE) _sys_home = _fix_pcbuild(_sys_home) def is_python_build(check_home=False): if check_home and _sys_home: return _is_python_source_dir(_sys_home) return _is_python_source_dir(_PROJECT_BASE) _PYTHON_BUILD = is_python_build(True) if _PYTHON_BUILD: for scheme in ('posix_prefix', 'posix_home'): # On POSIX-y platofrms, Python will: # - Build from .h files in 'headers' (which is only added to the # scheme when building CPython) # - Install .h files to 'include' scheme = _INSTALL_SCHEMES[scheme] scheme['headers'] = scheme['include'] scheme['include'] = '{srcdir}/Include' scheme['platinclude'] = '{projectbase}/.' def _subst_vars(s, local_vars): try: return s.format(**local_vars) except KeyError as var: try: return s.format(**os.environ) except KeyError: raise AttributeError(f'{var}') from None def _extend_dict(target_dict, other_dict): target_keys = target_dict.keys() for key, value in other_dict.items(): if key in target_keys: continue target_dict[key] = value def _expand_vars(scheme, vars): res = {} if vars is None: vars = {} _extend_dict(vars, get_config_vars()) for key, value in _INSTALL_SCHEMES[scheme].items(): if os.name in ('posix', 'nt'): value = os.path.expanduser(value) res[key] = os.path.normpath(_subst_vars(value, vars)) return res def _get_preferred_schemes(): if os.name == 'nt': return { 'prefix': 'nt', 'home': 'posix_home', 'user': 'nt_user', } if sys.platform == 'darwin' and sys._framework: return { 'prefix': 'posix_prefix', 'home': 'posix_home', 'user': 'osx_framework_user', } return { 'prefix': 'posix_prefix', 'home': 'posix_home', 'user': 'posix_user', } def get_preferred_scheme(key): scheme = _get_preferred_schemes()[key] if scheme not in _INSTALL_SCHEMES: raise ValueError( f"{key!r} returned {scheme!r}, which is not a valid scheme " f"on this platform" ) return scheme def get_default_scheme(): return get_preferred_scheme('prefix') def _parse_makefile(filename, vars=None, keep_unresolved=True): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ import re if vars is None: vars = {} done = {} notdone = {} with open(filename, encoding=sys.getfilesystemencoding(), errors="surrogateescape") as f: lines = f.readlines() for line in lines: if line.startswith('#') or line.strip() == '': continue m = re.match(_variable_rx, line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: if n in _ALWAYS_STR: raise ValueError v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # do variable interpolation here variables = list(notdone.keys()) # Variables with a 'PY_' prefix in the makefile. These need to # be made available without that prefix through sysconfig. # Special care is needed to ensure that variable expansion works, even # if the expansion uses the name without a prefix. renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') while len(variables) > 0: for name in tuple(variables): value = notdone[name] m1 = re.search(_findvar1_rx, value) m2 = re.search(_findvar2_rx, value) if m1 and m2: m = m1 if m1.start() < m2.start() else m2 else: m = m1 if m1 else m2 if m is not None: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] elif n in renamed_variables: if (name.startswith('PY_') and name[3:] in renamed_variables): item = "" elif 'PY_' + n in notdone: found = False else: item = str(done['PY_' + n]) else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: if name in _ALWAYS_STR: raise ValueError value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value variables.remove(name) if name.startswith('PY_') \ and name[3:] in renamed_variables: name = name[3:] if name not in done: done[name] = value else: # Adds unresolved variables to the done dict. # This is disabled when called from distutils.sysconfig if keep_unresolved: done[name] = value # bogus variable reference (e.g. "prefix=$/opt/python"); # just drop it since we can't deal variables.remove(name) # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary vars.update(done) return vars def get_makefile_filename(): """Return the path of the Makefile.""" if _PYTHON_BUILD: return os.path.join(_sys_home or _PROJECT_BASE, "Makefile") if hasattr(sys, 'abiflags'): config_dir_name = f'config-{_PY_VERSION_SHORT}{sys.abiflags}' else: config_dir_name = 'config' if hasattr(sys.implementation, '_multiarch'): config_dir_name += f'-{sys.implementation._multiarch}' return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') def _get_sysconfigdata_name(): multiarch = getattr(sys.implementation, '_multiarch', '') return os.environ.get( '_PYTHON_SYSCONFIGDATA_NAME', f'_sysconfigdata_{sys.abiflags}_{sys.platform}_{multiarch}', ) def _generate_posix_vars(): """Generate the Python module containing build-time variables.""" import pprint vars = {} # load the installed Makefile: makefile = get_makefile_filename() try: _parse_makefile(makefile, vars) except OSError as e: msg = f"invalid Python installation: unable to open {makefile}" if hasattr(e, "strerror"): msg = f"{msg} ({e.strerror})" raise OSError(msg) # load the installed pyconfig.h: config_h = get_config_h_filename() try: with open(config_h, encoding="utf-8") as f: parse_config_h(f, vars) except OSError as e: msg = f"invalid Python installation: unable to open {config_h}" if hasattr(e, "strerror"): msg = f"{msg} ({e.strerror})" raise OSError(msg) # On AIX, there are wrong paths to the linker scripts in the Makefile # -- these paths are relative to the Python source, but when installed # the scripts are in another directory. if _PYTHON_BUILD: vars['BLDSHARED'] = vars['LDSHARED'] # There's a chicken-and-egg situation on OS X with regards to the # _sysconfigdata module after the changes introduced by #15298: # get_config_vars() is called by get_platform() as part of the # `make pybuilddir.txt` target -- which is a precursor to the # _sysconfigdata.py module being constructed. Unfortunately, # get_config_vars() eventually calls _init_posix(), which attempts # to import _sysconfigdata, which we won't have built yet. In order # for _init_posix() to work, if we're on Darwin, just mock up the # _sysconfigdata module manually and populate it with the build vars. # This is more than sufficient for ensuring the subsequent call to # get_platform() succeeds. name = _get_sysconfigdata_name() if 'darwin' in sys.platform: import types module = types.ModuleType(name) module.build_time_vars = vars sys.modules[name] = module pybuilddir = f'build/lib.{get_platform()}-{_PY_VERSION_SHORT}' if hasattr(sys, "gettotalrefcount"): pybuilddir += '-pydebug' os.makedirs(pybuilddir, exist_ok=True) destfile = os.path.join(pybuilddir, name + '.py') with open(destfile, 'w', encoding='utf8') as f: f.write('# system configuration generated and used by' ' the sysconfig module\n') f.write('build_time_vars = ') pprint.pprint(vars, stream=f) # Create file used for sys.path fixup -- see Modules/getpath.c with open('pybuilddir.txt', 'w', encoding='utf8') as f: f.write(pybuilddir) def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" # _sysconfigdata is generated at build time, see _generate_posix_vars() name = _get_sysconfigdata_name() _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) build_time_vars = _temp.build_time_vars vars.update(build_time_vars) def _init_non_posix(vars): """Initialize the module as appropriate for NT""" # set basic install directories import _imp vars['LIBDEST'] = get_path('stdlib') vars['BINLIBDEST'] = get_path('platstdlib') vars['INCLUDEPY'] = get_path('include') vars['EXT_SUFFIX'] = _imp.extension_suffixes()[0] vars['EXE'] = '.exe' vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) vars['TZPATH'] = '' # # public APIs # def parse_config_h(fp, vars=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if vars is None: vars = {} import re define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: if n in _ALWAYS_STR: raise ValueError v = int(v) except ValueError: pass vars[n] = v else: m = undef_rx.match(line) if m: vars[m.group(1)] = 0 return vars def get_config_h_filename(): """Return the path of pyconfig.h.""" if _PYTHON_BUILD: if os.name == "nt": inc_dir = os.path.join(_sys_home or _PROJECT_BASE, "PC") else: inc_dir = _sys_home or _PROJECT_BASE else: inc_dir = get_path('platinclude') return os.path.join(inc_dir, 'pyconfig.h') def get_scheme_names(): """Return a tuple containing the schemes names.""" return tuple(sorted(_INSTALL_SCHEMES)) def get_path_names(): """Return a tuple containing the paths names.""" return _SCHEME_KEYS def get_paths(scheme=get_default_scheme(), vars=None, expand=True): """Return a mapping containing an install scheme. ``scheme`` is the install scheme name. If not provided, it will return the default scheme for the current platform. """ if expand: return _expand_vars(scheme, vars) else: return _INSTALL_SCHEMES[scheme] def get_path(name, scheme=get_default_scheme(), vars=None, expand=True): """Return a path corresponding to the scheme. ``scheme`` is the install scheme name. """ return get_paths(scheme, vars, expand)[name] def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. On Unix, this means every variable defined in Python's installed Makefile; On Windows it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _CONFIG_VARS if _CONFIG_VARS is None: _CONFIG_VARS = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # Distutils. _CONFIG_VARS['prefix'] = _PREFIX _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX _CONFIG_VARS['py_version'] = _PY_VERSION _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT _CONFIG_VARS['py_version_nodot'] = _PY_VERSION_SHORT_NO_DOT _CONFIG_VARS['installed_base'] = _BASE_PREFIX _CONFIG_VARS['base'] = _PREFIX _CONFIG_VARS['installed_platbase'] = _BASE_EXEC_PREFIX _CONFIG_VARS['platbase'] = _EXEC_PREFIX _CONFIG_VARS['projectbase'] = _PROJECT_BASE _CONFIG_VARS['platlibdir'] = sys.platlibdir try: _CONFIG_VARS['abiflags'] = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. _CONFIG_VARS['abiflags'] = '' try: _CONFIG_VARS['py_version_nodot_plat'] = sys.winver.replace('.', '') except AttributeError: _CONFIG_VARS['py_version_nodot_plat'] = '' if os.name == 'nt': _init_non_posix(_CONFIG_VARS) if os.name == 'posix': _init_posix(_CONFIG_VARS) # For backward compatibility, see issue19555 SO = _CONFIG_VARS.get('EXT_SUFFIX') if SO is not None: _CONFIG_VARS['SO'] = SO if _HAS_USER_BASE: # Setting 'userbase' is done below the call to the # init function to enable using 'get_config_var' in # the init-function. _CONFIG_VARS['userbase'] = _getuserbase() # Always convert srcdir to an absolute path srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE) if os.name == 'posix': if _PYTHON_BUILD: # If srcdir is a relative path (typically '.' or '..') # then it should be interpreted relative to the directory # containing Makefile. base = os.path.dirname(get_makefile_filename()) srcdir = os.path.join(base, srcdir) else: # srcdir is not meaningful since the installation is # spread about the filesystem. We choose the # directory containing the Makefile since we know it # exists. srcdir = os.path.dirname(get_makefile_filename()) _CONFIG_VARS['srcdir'] = _safe_realpath(srcdir) # OS X platforms require special customization to handle # multi-architecture, multi-os-version installers if sys.platform == 'darwin': import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) if args: vals = [] for name in args: vals.append(_CONFIG_VARS.get(name)) return vals else: return _CONFIG_VARS def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ if name == 'SO': import warnings warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2) return get_config_vars().get(name) def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; on Linux, the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': if 'amd64' in sys.version.lower(): return 'win-amd64' if '(arm)' in sys.version.lower(): return 'win-arm32' if '(arm64)' in sys.version.lower(): return 'win-arm64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha return sys.platform # Set for cross builds explicitly if "_PYTHON_HOST_PLATFORM" in os.environ: return os.environ["_PYTHON_HOST_PLATFORM"] # Try to distinguish various flavours of Unix osname, host, release, version, machine = os.uname() # Convert the OS name to lowercase, remove '/' characters, and translate # spaces (for "Power Macintosh") osname = osname.lower().replace('/', '') machine = machine.replace(' ', '_') machine = machine.replace('/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return f"{osname}-{machine}" elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = f"{int(release[0]) - 3}.{release[2:]}" # We can't use "platform.architecture()[0]" because a # bootstrap problem. We use a dict to get an error # if some suspicious happens. bitness = {2147483647:"32bit", 9223372036854775807:"64bit"} machine += f".{bitness[sys.maxsize]}" # fall through to standard osname-release-machine representation elif osname[:3] == "aix": from _aix_support import aix_platform return aix_platform() elif osname[:6] == "cygwin": osname = "cygwin" import re rel_re = re.compile(r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": import _osx_support osname, release, machine = _osx_support.get_platform_osx( get_config_vars(), osname, release, machine) return f"{osname}-{release}-{machine}" def get_python_version(): return _PY_VERSION_SHORT def expand_makefile_vars(s, vars): """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in 'string' according to 'vars' (a dictionary mapping variable names to values). Variables not present in 'vars' are silently expanded to the empty string. The variable values in 'vars' should not contain further variable expansions; if 'vars' is the output of 'parse_makefile()', you're fine. Returns a variable-expanded version of 's'. """ import re # This algorithm does multiple expansion, so if vars['foo'] contains # "${bar}", it will expand ${foo} to ${bar}, and then expand # ${bar}... and so forth. This is fine as long as 'vars' comes from # 'parse_makefile()', which takes care of such expansions eagerly, # according to make's variable expansion semantics. while True: m = re.search(_findvar1_rx, s) or re.search(_findvar2_rx, s) if m: (beg, end) = m.span() s = s[0:beg] + vars.get(m.group(1)) + s[end:] else: break return s def _print_dict(title, data): for index, (key, value) in enumerate(sorted(data.items())): if index == 0: print(f'{title}: ') print(f'\t{key} = "{value}"') def _main(): """Display all information sysconfig detains.""" if '--generate-posix-vars' in sys.argv: _generate_posix_vars() return print(f'Platform: "{get_platform()}"') print(f'Python version: "{get_python_version()}"') print(f'Current installation scheme: "{get_default_scheme()}"') print() _print_dict('Paths', get_paths()) print() _print_dict('Variables', get_config_vars()) if __name__ == '__main__': _main()
#!/usr/bin/env python # Copyright 2013 The Swarming Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 that # can be found in the LICENSE file. # pylint: disable=W0212,W0223,W0231,W0613 import base64 import hashlib import json import logging import os import StringIO import sys import tempfile import unittest import urllib import zlib # net_utils adjusts sys.path. import net_utils import auth import isolated_format import isolateserver import test_utils from depot_tools import auto_stub from depot_tools import fix_encoding from utils import file_path from utils import threading_utils import isolateserver_mock CONTENTS = { 'empty_file.txt': '', 'small_file.txt': 'small file\n', # TODO(maruel): symlinks. } class TestCase(net_utils.TestCase): """Mocks out url_open() calls and sys.stdout/stderr.""" _tempdir = None def setUp(self): super(TestCase, self).setUp() self.mock(auth, 'ensure_logged_in', lambda _: None) self.mock(sys, 'stdout', StringIO.StringIO()) self.mock(sys, 'stderr', StringIO.StringIO()) self.old_cwd = os.getcwd() def tearDown(self): try: os.chdir(self.old_cwd) if self._tempdir: file_path.rmtree(self._tempdir) if not self.has_failed(): self.checkOutput('', '') finally: super(TestCase, self).tearDown() @property def tempdir(self): if not self._tempdir: self._tempdir = tempfile.mkdtemp(prefix=u'isolateserver') return self._tempdir def make_tree(self, contents): test_utils.make_tree(self.tempdir, contents) def checkOutput(self, expected_out, expected_err): try: self.assertEqual(expected_err, sys.stderr.getvalue()) self.assertEqual(expected_out, sys.stdout.getvalue()) finally: # Prevent double-fail. self.mock(sys, 'stdout', StringIO.StringIO()) self.mock(sys, 'stderr', StringIO.StringIO()) class TestZipCompression(TestCase): """Test zip_compress and zip_decompress generators.""" def test_compress_and_decompress(self): """Test data === decompress(compress(data)).""" original = [str(x) for x in xrange(0, 1000)] processed = isolateserver.zip_decompress( isolateserver.zip_compress(original)) self.assertEqual(''.join(original), ''.join(processed)) def test_zip_bomb(self): """Verify zip_decompress always returns small chunks.""" original = '\x00' * 100000 bomb = ''.join(isolateserver.zip_compress(original)) decompressed = [] chunk_size = 1000 for chunk in isolateserver.zip_decompress([bomb], chunk_size): self.assertLessEqual(len(chunk), chunk_size) decompressed.append(chunk) self.assertEqual(original, ''.join(decompressed)) def test_bad_zip_file(self): """Verify decompressing broken file raises IOError.""" with self.assertRaises(IOError): ''.join(isolateserver.zip_decompress(['Im not a zip file'])) class FakeItem(isolateserver.Item): def __init__(self, data, high_priority=False): super(FakeItem, self).__init__( isolateserver_mock.hash_content(data), len(data), high_priority) self.data = data def content(self): return [self.data] @property def zipped(self): return zlib.compress(self.data, self.compression_level) class MockedStorageApi(isolateserver.StorageApi): def __init__( self, missing_hashes, push_side_effect=None, namespace='default'): self.missing_hashes = missing_hashes self.push_side_effect = push_side_effect self.push_calls = [] self.contains_calls = [] self._namespace = namespace @property def namespace(self): return self._namespace def push(self, item, push_state, content=None): content = ''.join(item.content() if content is None else content) self.push_calls.append((item, push_state, content)) if self.push_side_effect: self.push_side_effect() def contains(self, items): self.contains_calls.append(items) missing = {} for item in items: if item.digest in self.missing_hashes: missing[item] = self.missing_hashes[item.digest] return missing class StorageTest(TestCase): """Tests for Storage methods.""" def assertEqualIgnoringOrder(self, a, b): """Asserts that containers |a| and |b| contain same items.""" self.assertEqual(len(a), len(b)) self.assertEqual(set(a), set(b)) def get_push_state(self, storage, item): missing = list(storage.get_missing_items([item])) self.assertEqual(1, len(missing)) self.assertEqual(item, missing[0][0]) return missing[0][1] def test_batch_items_for_check(self): items = [ isolateserver.Item('foo', 12), isolateserver.Item('blow', 0), isolateserver.Item('bizz', 1222), isolateserver.Item('buzz', 1223), ] expected = [ [items[3], items[2], items[0], items[1]], ] batches = list(isolateserver.batch_items_for_check(items)) self.assertEqual(batches, expected) def test_get_missing_items(self): items = [ isolateserver.Item('foo', 12), isolateserver.Item('blow', 0), isolateserver.Item('bizz', 1222), isolateserver.Item('buzz', 1223), ] missing = { items[2]: 123, items[3]: 456, } storage_api = MockedStorageApi( {item.digest: push_state for item, push_state in missing.iteritems()}) storage = isolateserver.Storage(storage_api) # 'get_missing_items' is a generator yielding pairs, materialize its # result in a dict. result = dict(storage.get_missing_items(items)) self.assertEqual(missing, result) def test_async_push(self): for use_zip in (False, True): item = FakeItem('1234567') storage_api = MockedStorageApi( {item.digest: 'push_state'}, namespace='default-gzip' if use_zip else 'default') storage = isolateserver.Storage(storage_api) channel = threading_utils.TaskChannel() storage.async_push(channel, item, self.get_push_state(storage, item)) # Wait for push to finish. pushed_item = channel.pull() self.assertEqual(item, pushed_item) # StorageApi.push was called with correct arguments. self.assertEqual( [(item, 'push_state', item.zipped if use_zip else item.data)], storage_api.push_calls) def test_async_push_generator_errors(self): class FakeException(Exception): pass def faulty_generator(): yield 'Hi!' raise FakeException('fake exception') for use_zip in (False, True): item = FakeItem('') self.mock(item, 'content', faulty_generator) storage_api = MockedStorageApi( {item.digest: 'push_state'}, namespace='default-gzip' if use_zip else 'default') storage = isolateserver.Storage(storage_api) channel = threading_utils.TaskChannel() storage.async_push(channel, item, self.get_push_state(storage, item)) with self.assertRaises(FakeException): channel.pull() # StorageApi's push should never complete when data can not be read. self.assertEqual(0, len(storage_api.push_calls)) def test_async_push_upload_errors(self): chunk = 'data_chunk' def _generator(): yield chunk def push_side_effect(): raise IOError('Nope') # TODO(vadimsh): Retrying push when fetching data from a generator is # broken now (it reuses same generator instance when retrying). content_sources = ( # generator(), lambda: [chunk], ) for use_zip in (False, True): for source in content_sources: item = FakeItem(chunk) self.mock(item, 'content', source) storage_api = MockedStorageApi( {item.digest: 'push_state'}, push_side_effect, namespace='default-gzip' if use_zip else 'default') storage = isolateserver.Storage(storage_api) channel = threading_utils.TaskChannel() storage.async_push(channel, item, self.get_push_state(storage, item)) with self.assertRaises(IOError): channel.pull() # First initial attempt + all retries. attempts = 1 + storage.net_thread_pool.RETRIES # Single push attempt call arguments. expected_push = ( item, 'push_state', item.zipped if use_zip else item.data) # Ensure all pushes are attempted. self.assertEqual( [expected_push] * attempts, storage_api.push_calls) def test_upload_tree(self): files = { u'/a': { 's': 100, 'h': 'hash_a', }, u'/some/dir/b': { 's': 200, 'h': 'hash_b', }, u'/another/dir/c': { 's': 300, 'h': 'hash_c', }, u'/a_copy': { 's': 100, 'h': 'hash_a', }, } files_data = {k: 'x' * files[k]['s'] for k in files} all_hashes = set(f['h'] for f in files.itervalues()) missing_hashes = {'hash_a': 'push a', 'hash_b': 'push b'} # Files read by mocked_file_read. read_calls = [] def mocked_file_read(filepath, chunk_size=0, offset=0): self.assertIn(filepath, files_data) read_calls.append(filepath) return files_data[filepath] self.mock(isolateserver, 'file_read', mocked_file_read) storage_api = MockedStorageApi(missing_hashes) storage = isolateserver.Storage(storage_api) def mock_get_storage(base_url, namespace): self.assertEqual('base_url', base_url) self.assertEqual('some-namespace', namespace) return storage self.mock(isolateserver, 'get_storage', mock_get_storage) isolateserver.upload_tree('base_url', files.iteritems(), 'some-namespace') # Was reading only missing files. self.assertEqualIgnoringOrder( missing_hashes, [files[path]['h'] for path in read_calls]) # 'contains' checked for existence of all files. self.assertEqualIgnoringOrder( all_hashes, [i.digest for i in sum(storage_api.contains_calls, [])]) # Pushed only missing files. self.assertEqualIgnoringOrder( missing_hashes, [call[0].digest for call in storage_api.push_calls]) # Pushing with correct data, size and push state. for pushed_item, push_state, pushed_content in storage_api.push_calls: filenames = [ name for name, metadata in files.iteritems() if metadata['h'] == pushed_item.digest ] # If there are multiple files that map to same hash, upload_tree chooses # a first one. filename = filenames[0] self.assertEqual(filename, pushed_item.path) self.assertEqual(files_data[filename], pushed_content) self.assertEqual(missing_hashes[pushed_item.digest], push_state) class IsolateServerStorageApiTest(TestCase): @staticmethod def mock_fetch_request(server, namespace, item, data=None, offset=0): compression = 'flate' if namespace.endswith(('-gzip', '-flate')) else '' if data is None: response = {'url': server + '/some/gs/url/%s/%s' % (namespace, item)} else: response = {'content': base64.b64encode(data[offset:])} return ( server + '/_ah/api/isolateservice/v1/retrieve', { 'data': { 'digest': item, 'namespace': { 'compression': compression, 'digest_hash': 'sha-1', 'namespace': namespace, }, 'offset': offset, }, 'read_timeout': 60, }, response, ) @staticmethod def mock_server_details_request(server): return ( server + '/_ah/api/isolateservice/v1/server_details', {'data': {}}, {'server_version': 'such a good version'} ) @staticmethod def mock_gs_request(server, namespace, item, data=None, offset=0, request_headers=None, response_headers=None): response = data return ( server + '/some/gs/url/%s/%s' % (namespace, item), {}, response, response_headers, ) @staticmethod def mock_contains_request( server, namespace, request, response, compression=''): url = server + '/_ah/api/isolateservice/v1/preupload' digest_collection = dict(request, namespace={ 'compression': compression, 'digest_hash': 'sha-1', 'namespace': namespace, }) return (url, {'data': digest_collection}, response) @staticmethod def mock_upload_request(server, content, ticket, response=None): url = server + '/_ah/api/isolateservice/v1/store_inline' request = {'content': content, 'upload_ticket': ticket} return (url, {'data': request}, response) def test_server_capabilities_success(self): server = 'http://example.com' namespace ='default' self.expected_requests([self.mock_server_details_request(server)]) storage = isolateserver.IsolateServer(server, namespace) caps = storage._server_capabilities self.assertEqual({'server_version': 'such a good version'}, caps) def test_fetch_success(self): server = 'http://example.com' namespace = 'default' data = ''.join(str(x) for x in xrange(1000)) item = isolateserver_mock.hash_content(data) self.expected_requests( [self.mock_fetch_request(server, namespace, item, data)]) storage = isolateserver.IsolateServer(server, namespace) fetched = ''.join(storage.fetch(item)) self.assertEqual(data, fetched) def test_fetch_failure(self): server = 'http://example.com' namespace = 'default' item = isolateserver_mock.hash_content('something') self.expected_requests( [self.mock_fetch_request(server, namespace, item)[:-1] + (None,)]) storage = isolateserver.IsolateServer(server, namespace) with self.assertRaises(IOError): _ = ''.join(storage.fetch(item)) def test_fetch_offset_success(self): server = 'http://example.com' namespace = 'default' data = ''.join(str(x) for x in xrange(1000)) item = isolateserver_mock.hash_content(data) offset = 200 size = len(data) good_content_range_headers = [ 'bytes %d-%d/%d' % (offset, size - 1, size), 'bytes %d-%d/*' % (offset, size - 1), ] for _content_range_header in good_content_range_headers: self.expected_requests([self.mock_fetch_request( server, namespace, item, data, offset=offset)]) storage = isolateserver.IsolateServer(server, namespace) fetched = ''.join(storage.fetch(item, offset)) self.assertEqual(data[offset:], fetched) def test_fetch_offset_bad_header(self): server = 'http://example.com' namespace = 'default' data = ''.join(str(x) for x in xrange(1000)) item = isolateserver_mock.hash_content(data) offset = 200 size = len(data) bad_content_range_headers = [ # Missing header. None, '', # Bad format. 'not bytes %d-%d/%d' % (offset, size - 1, size), 'bytes %d-%d' % (offset, size - 1), # Bad offset. 'bytes %d-%d/%d' % (offset - 1, size - 1, size), # Incomplete chunk. 'bytes %d-%d/%d' % (offset, offset + 10, size), ] for content_range_header in bad_content_range_headers: self.expected_requests([ self.mock_fetch_request( server, namespace, item, offset=offset), self.mock_gs_request( server, namespace, item, data, offset=offset, request_headers={'Range': 'bytes=%d-' % offset}, response_headers={'Content-Range': content_range_header}), ]) storage = isolateserver.IsolateServer(server, namespace) with self.assertRaises(IOError): _ = ''.join(storage.fetch(item, offset)) def test_push_success(self): server = 'http://example.com' namespace = 'default' data = ''.join(str(x) for x in xrange(1000)) item = FakeItem(data) contains_request = {'items': [ {'digest': item.digest, 'size': item.size, 'is_isolated': 0}]} contains_response = {'items': [{'index': 0, 'upload_ticket': 'ticket!'}]} requests = [ self.mock_contains_request( server, namespace, contains_request, contains_response), self.mock_upload_request( server, base64.b64encode(data), contains_response['items'][0]['upload_ticket'], {'ok': True}, ), ] self.expected_requests(requests) storage = isolateserver.IsolateServer(server, namespace) missing = storage.contains([item]) self.assertEqual([item], missing.keys()) push_state = missing[item] storage.push(item, push_state, [data]) self.assertTrue(push_state.uploaded) self.assertTrue(push_state.finalized) def test_push_failure_upload(self): server = 'http://example.com' namespace = 'default' data = ''.join(str(x) for x in xrange(1000)) item = FakeItem(data) contains_request = {'items': [ {'digest': item.digest, 'size': item.size, 'is_isolated': 0}]} contains_response = {'items': [{'index': 0, 'upload_ticket': 'ticket!'}]} requests = [ self.mock_contains_request( server, namespace, contains_request, contains_response), self.mock_upload_request( server, base64.b64encode(data), contains_response['items'][0]['upload_ticket'], ), ] self.expected_requests(requests) storage = isolateserver.IsolateServer(server, namespace) missing = storage.contains([item]) self.assertEqual([item], missing.keys()) push_state = missing[item] with self.assertRaises(IOError): storage.push(item, push_state, [data]) self.assertFalse(push_state.uploaded) self.assertFalse(push_state.finalized) def test_push_failure_finalize(self): server = 'http://example.com' namespace = 'default' data = ''.join(str(x) for x in xrange(1000)) item = FakeItem(data) contains_request = {'items': [ {'digest': item.digest, 'size': item.size, 'is_isolated': 0}]} contains_response = {'items': [ {'index': 0, 'gs_upload_url': server + '/content-gs/whatevs/1234', 'upload_ticket': 'ticket!'}]} requests = [ self.mock_contains_request( server, namespace, contains_request, contains_response), ( server + '/content-gs/whatevs/1234', { 'data': data, 'content_type': 'application/octet-stream', 'method': 'PUT', }, '', None, ), ( server + '/_ah/api/isolateservice/v1/finalize_gs_upload', {'data': {'upload_ticket': 'ticket!'}}, None, ), ] self.expected_requests(requests) storage = isolateserver.IsolateServer(server, namespace) missing = storage.contains([item]) self.assertEqual([item], missing.keys()) push_state = missing[item] with self.assertRaises(IOError): storage.push(item, push_state, [data]) self.assertTrue(push_state.uploaded) self.assertFalse(push_state.finalized) def test_contains_success(self): server = 'http://example.com' namespace = 'default' files = [ FakeItem('1', high_priority=True), FakeItem('2' * 100), FakeItem('3' * 200), ] request = {'items': [ {'digest': f.digest, 'is_isolated': not i, 'size': f.size} for i, f in enumerate(files)]} response = { 'items': [ {'index': str(i), 'upload_ticket': 'ticket_%d' % i} for i in xrange(3)], } missing = [ files[0], files[1], files[2], ] self._requests = [ self.mock_contains_request(server, namespace, request, response), ] storage = isolateserver.IsolateServer(server, namespace) result = storage.contains(files) self.assertEqual(set(missing), set(result.keys())) for i, (_item, push_state) in enumerate(result.iteritems()): self.assertEqual( push_state.upload_url, '_ah/api/isolateservice/v1/store_inline') self.assertEqual(push_state.finalize_url, None) def test_contains_network_failure(self): server = 'http://example.com' namespace = 'default' self.expected_requests([self.mock_contains_request( server, namespace, {'items': []}, None)]) storage = isolateserver.IsolateServer(server, namespace) with self.assertRaises(isolated_format.MappingError): storage.contains([]) def test_contains_format_failure(self): server = 'http://example.com' namespace = 'default' self.expected_requests([self.mock_contains_request( server, namespace, {'items': []}, None)]) storage = isolateserver.IsolateServer(server, namespace) with self.assertRaises(isolated_format.MappingError): storage.contains([]) class IsolateServerStorageSmokeTest(unittest.TestCase): """Tests public API of Storage class using file system as a store.""" def setUp(self): super(IsolateServerStorageSmokeTest, self).setUp() self.tempdir = tempfile.mkdtemp(prefix=u'isolateserver') self.server = isolateserver_mock.MockIsolateServer() def tearDown(self): try: self.server.close_start() file_path.rmtree(self.tempdir) self.server.close_end() finally: super(IsolateServerStorageSmokeTest, self).tearDown() def run_synchronous_push_test(self, namespace): storage = isolateserver.get_storage(self.server.url, namespace) # Items to upload. items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)] # Storage is empty, all items are missing. missing = dict(storage.get_missing_items(items)) self.assertEqual(set(items), set(missing)) # Push, one by one. for item, push_state in missing.iteritems(): storage.push(item, push_state) # All items are there now. self.assertFalse(dict(storage.get_missing_items(items))) def test_synchronous_push(self): self.run_synchronous_push_test('default') def test_synchronous_push_gzip(self): self.run_synchronous_push_test('default-gzip') def run_upload_items_test(self, namespace): storage = isolateserver.get_storage(self.server.url, namespace) # Items to upload. items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)] # Do it. uploaded = storage.upload_items(items) self.assertEqual(set(items), set(uploaded)) # All items are there now. self.assertFalse(dict(storage.get_missing_items(items))) # Now ensure upload_items skips existing items. more = [isolateserver.BufferItem('more item %d' % i) for i in xrange(10)] # Uploaded only |more|. uploaded = storage.upload_items(items + more) self.assertEqual(set(more), set(uploaded)) def test_upload_items(self): self.run_upload_items_test('default') def test_upload_items_gzip(self): self.run_upload_items_test('default-gzip') def run_push_and_fetch_test(self, namespace): storage = isolateserver.get_storage(self.server.url, namespace) # Upload items. items = [isolateserver.BufferItem('item %d' % i) for i in xrange(10)] uploaded = storage.upload_items(items) self.assertEqual(set(items), set(uploaded)) # Fetch them all back into local memory cache. cache = isolateserver.MemoryCache() queue = isolateserver.FetchQueue(storage, cache) # Start fetching. pending = set() for item in items: pending.add(item.digest) queue.add(item.digest) # Wait for fetch to complete. while pending: fetched = queue.wait(pending) pending.discard(fetched) # Ensure fetched same data as was pushed. self.assertEqual( [i.buffer for i in items], [cache.read(i.digest) for i in items]) def test_push_and_fetch(self): self.run_push_and_fetch_test('default') def test_push_and_fetch_gzip(self): self.run_push_and_fetch_test('default-gzip') if sys.maxsize == (2**31) - 1: def test_archive_multiple_huge_file(self): self.server.discard_content() # Create multiple files over 2.5gb. This test exists to stress the virtual # address space on 32 bits systems. Make real files since it wouldn't fit # memory by definition. # Sadly, this makes this test very slow so it's only run on 32 bits # platform, since it's known to work on 64 bits platforms anyway. # # It's a fairly slow test, well over 15 seconds. files = {} size = 512 * 1024 * 1024 for i in xrange(5): name = '512mb_%d.%s' % (i, isolateserver.ALREADY_COMPRESSED_TYPES[0]) p = os.path.join(self.tempdir, name) with open(p, 'wb') as f: # Write 512mb. h = hashlib.sha1() data = os.urandom(1024) for _ in xrange(size / 1024): f.write(data) h.update(data) os.chmod(p, 0600) files[p] = { 'h': h.hexdigest(), 'm': 0600, 's': size, } if sys.platform == 'win32': files[p].pop('m') # upload_tree() is a thin wrapper around Storage. isolateserver.upload_tree(self.server.url, files.items(), 'testing') expected = {'testing': {f['h']: '<skipped>' for f in files.itervalues()}} self.assertEqual(expected, self.server.contents) class IsolateServerDownloadTest(TestCase): def _url_read_json(self, url, **kwargs): """Current _url_read_json mock doesn't respect identical URLs.""" logging.warn('url_read_json(%s, %s)', url[:500], str(kwargs)[:500]) with self._lock: if not self._requests: return None if not self._flagged_requests: self._flagged_requests = [0 for _element in self._requests] # Ignore 'stream' argument, it's not important for these tests. kwargs.pop('stream', None) for i, (new_url, expected_kwargs, result) in enumerate(self._requests): if new_url == url and expected_kwargs == kwargs: self._flagged_requests[i] = 1 return result self.fail('Unknown request %s' % url) def setUp(self): super(IsolateServerDownloadTest, self).setUp() self._flagged_requests = [] def tearDown(self): if all(self._flagged_requests): self._requests = [] super(IsolateServerDownloadTest, self).tearDown() def test_download_two_files(self): # Test downloading two files. actual = {} def out(key, generator): actual[key] = ''.join(generator) self.mock(isolateserver, 'file_write', out) server = 'http://example.com' requests = [ ( server + '/_ah/api/isolateservice/v1/retrieve', { 'data': { 'digest': h.encode('utf-8'), 'namespace': { 'namespace': 'default-gzip', 'digest_hash': 'sha-1', 'compression': 'flate', }, 'offset': 0, }, 'read_timeout': 60, }, {'content': base64.b64encode(zlib.compress(v))}, ) for h, v in [('sha-1', 'Coucou'), ('sha-2', 'Bye Bye')] ] self.expected_requests(requests) cmd = [ 'download', '--isolate-server', server, '--target', net_utils.ROOT_DIR, '--file', 'sha-1', 'path/to/a', '--file', 'sha-2', 'path/to/b', ] self.assertEqual(0, isolateserver.main(cmd)) expected = { os.path.join(net_utils.ROOT_DIR, 'path/to/a'): 'Coucou', os.path.join(net_utils.ROOT_DIR, 'path/to/b'): 'Bye Bye', } self.assertEqual(expected, actual) def test_download_isolated(self): # Test downloading an isolated tree. actual = {} def file_write_mock(key, generator): actual[key] = ''.join(generator) self.mock(isolateserver, 'file_write', file_write_mock) self.mock(os, 'makedirs', lambda _: None) server = 'http://example.com' files = { os.path.join('a', 'foo'): 'Content', 'b': 'More content', } isolated = { 'command': ['Absurb', 'command'], 'relative_cwd': 'a', 'files': dict( (k, {'h': isolateserver_mock.hash_content(v), 's': len(v)}) for k, v in files.iteritems()), 'version': isolated_format.ISOLATED_FILE_VERSION, } isolated_data = json.dumps(isolated, sort_keys=True, separators=(',',':')) isolated_hash = isolateserver_mock.hash_content(isolated_data) requests = [(v['h'], files[k]) for k, v in isolated['files'].iteritems()] requests.append((isolated_hash, isolated_data)) requests = [ ( server + '/_ah/api/isolateservice/v1/retrieve', { 'data': { 'digest': h.encode('utf-8'), 'namespace': { 'namespace': 'default-gzip', 'digest_hash': 'sha-1', 'compression': 'flate', }, 'offset': 0, }, 'read_timeout': 60, }, {'content': base64.b64encode(zlib.compress(v))}, ) for h, v in requests ] cmd = [ 'download', '--isolate-server', server, '--target', self.tempdir, '--isolated', isolated_hash, ] self.expected_requests(requests) self.assertEqual(0, isolateserver.main(cmd)) expected = dict( (os.path.join(self.tempdir, k), v) for k, v in files.iteritems()) self.assertEqual(expected, actual) expected_stdout = ( 'To run this test please run from the directory %s:\n Absurb command\n' % os.path.join(self.tempdir, 'a')) self.checkOutput(expected_stdout, '') def get_storage(_isolate_server, namespace): class StorageFake(object): def __enter__(self, *_): return self def __exit__(self, *_): pass @property def hash_algo(self): # pylint: disable=R0201 return isolated_format.get_hash_algo(namespace) @staticmethod def upload_items(items): # Always returns the second item as not present. return [items[1]] return StorageFake() class TestArchive(TestCase): @staticmethod def get_isolateserver_prog(): """Returns 'isolateserver.py' or 'isolateserver.pyc'.""" return os.path.basename(sys.modules[isolateserver.__name__].__file__) def test_archive_no_server(self): with self.assertRaises(SystemExit): isolateserver.main(['archive', '.']) prog = self.get_isolateserver_prog() self.checkOutput( '', 'Usage: %(prog)s archive [options] <file1..fileN> or - to read ' 'from stdin\n\n' '%(prog)s: error: --isolate-server is required.\n' % {'prog': prog}) def test_archive_duplicates(self): with self.assertRaises(SystemExit): isolateserver.main( [ 'archive', '--isolate-server', 'https://localhost:1', # Effective dupes. '.', os.getcwd(), ]) prog = self.get_isolateserver_prog() self.checkOutput( '', 'Usage: %(prog)s archive [options] <file1..fileN> or - to read ' 'from stdin\n\n' '%(prog)s: error: Duplicate entries found.\n' % {'prog': prog}) def test_archive_files(self): self.mock(isolateserver, 'get_storage', get_storage) self.make_tree(CONTENTS) f = ['empty_file.txt', 'small_file.txt'] os.chdir(self.tempdir) isolateserver.main( ['archive', '--isolate-server', 'https://localhost:1'] + f) self.checkOutput( 'da39a3ee5e6b4b0d3255bfef95601890afd80709 empty_file.txt\n' '0491bd1da8087ad10fcdd7c9634e308804b72158 small_file.txt\n', '') def help_test_archive(self, cmd_line_prefix): self.mock(isolateserver, 'get_storage', get_storage) self.make_tree(CONTENTS) isolateserver.main(cmd_line_prefix + [self.tempdir]) # If you modify isolated_format.ISOLATED_FILE_VERSION, you'll have to update # the hash below. Sorry about that but this ensures the .isolated format is # stable. isolated = { 'algo': 'sha-1', 'files': {}, 'version': isolated_format.ISOLATED_FILE_VERSION, } for k, v in CONTENTS.iteritems(): isolated['files'][k] = { 'h': isolateserver_mock.hash_content(v), 's': len(v), } if sys.platform != 'win32': isolated['files'][k]['m'] = 0600 isolated_data = json.dumps(isolated, sort_keys=True, separators=(',',':')) isolated_hash = isolateserver_mock.hash_content(isolated_data) self.checkOutput( '%s %s\n' % (isolated_hash, self.tempdir), '') def test_archive_directory(self): self.help_test_archive(['archive', '-I', 'https://localhost:1']) def test_archive_directory_envvar(self): with test_utils.EnvVars({'ISOLATE_SERVER': 'https://localhost:1'}): self.help_test_archive(['archive']) def clear_env_vars(): for e in ('ISOLATE_DEBUG', 'ISOLATE_SERVER'): os.environ.pop(e, None) if __name__ == '__main__': fix_encoding.fix_encoding() if '-v' in sys.argv: unittest.TestCase.maxDiff = None logging.basicConfig( level=(logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)) clear_env_vars() unittest.main()
""" Utilities for validating inputs to user-facing API functions. """ from textwrap import dedent from functools import wraps from inspect import getargspec from uuid import uuid4 from six import iteritems, viewkeys, exec_ from toolz import valmap NO_DEFAULT = object() def expect_types(*_pos, **named): """ Preprocessing decorator that verifies inputs have expected types. Usage ----- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') Traceback (most recent call last): ... TypeError: foo() expected an argument of type 'int' for argument 'x', but got float instead. # noqa """ if _pos: raise TypeError("expect_types() only takes keyword arguments.") for name, type_ in iteritems(named): if not isinstance(type_, (type, tuple)): raise TypeError( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead.".format( name=name, type_=type_, ) ) return preprocess(**valmap(_expect_type, named)) def preprocess(*_unused, **processors): """ Decorator that applies pre-processors to the arguments of a function before calling the function. Parameters ---------- **processors : dict Map from argument name -> processor function. A processor function takes three arguments: (func, argname, argvalue). `func` is the the function for which we're processing args. `argname` is the name of the argument we're processing. `argvalue` is the value of the argument we're processing. Usage ----- >>> def _ensure_tuple(func, argname, arg): ... if isinstance(arg, tuple): ... return argvalue ... try: ... return tuple(arg) ... except TypeError: ... raise TypeError( ... "%s() expected argument '%s' to" ... " be iterable, but got %s instead." % ( ... func.__name__, argname, arg, ... ) ... ) ... >>> @preprocess(arg=_ensure_tuple) ... def foo(arg): ... return arg ... >>> foo([1, 2, 3]) (1, 2, 3) >>> foo("a") ('a',) >>> foo(2) Traceback (most recent call last): ... TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead. """ if _unused: raise TypeError("preprocess() doesn't accept positional arguments") def _decorator(f): args, varargs, varkw, defaults = argspec = getargspec(f) if defaults is None: defaults = () no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults)) args_defaults = zip(args, no_defaults + defaults) argset = set(args) # These assumptions simplify the implementation significantly. If you # really want to validate a *args/**kwargs function, you'll have to # implement this here or do it yourself. if varargs: raise TypeError( "Can't validate functions that take *args: %s" % argspec ) if varkw: raise TypeError( "Can't validate functions that take **kwargs: %s" % argspec ) # Arguments can be declared as tuples in Python 2. if not all(isinstance(arg, str) for arg in args): raise TypeError( "Can't validate functions using tuple unpacking: %s" % argspec ) # Ensure that all processors map to valid names. bad_names = viewkeys(processors) - argset if bad_names: raise TypeError( "Got processors for unknown arguments: %s." % bad_names ) return _build_preprocessed_function(f, processors, args_defaults) return _decorator def call(f): """ Wrap a function in a processor that calls `f` on the argument before passing it along. Useful for creating simple arguments to the `@preprocess` decorator. Parameters ---------- f : function Function accepting a single argument and returning a replacement. Usage ----- >>> @preprocess(x=call(lambda x: x + 1)) ... def foo(x): ... return x ... >>> foo(1) 2 """ @wraps(f) def processor(func, argname, arg): return f(arg) return processor def _qualified_name(obj): """ Return the fully-qualified name (ignoring inner classes) of a type. """ module = obj.__module__ if module in ('__builtin__', '__main__', 'builtins'): return obj.__name__ return '.'.join([module, obj.__name__]) def _expect_type(type_): """ Factory for type-checking functions that work the @preprocess decorator. """ # Slightly different messages for type and tuple of types. _template = ( "{{funcname}}() expected a value of type {type_or_types} " "for argument '{{argname}}', but got {{actual}} instead." ) if isinstance(type_, tuple): template = _template.format( type_or_types=' or '.join(map(_qualified_name, type_)) ) else: template = _template.format(type_or_types=_qualified_name(type_)) def _check_type(func, argname, argvalue): if not isinstance(argvalue, type_): raise TypeError( template.format( funcname=_qualified_name(func), argname=argname, actual=_qualified_name(type(argvalue)), ) ) return argvalue return _check_type def optional(type_): """ Helper for use with `expect_types` when an input can be `type_` or `None`. Returns an object such that both `None` and instances of `type_` pass checks of the form `isinstance(obj, optional(type_))`. Parameters ---------- type_ : type Type for which to produce an option. Examples -------- >>> isinstance({}, optional(dict)) True >>> isinstance(None, optional(dict)) True >>> isinstance(1, optional(dict)) False """ return (type_, type(None)) def _build_preprocessed_function(func, processors, args_defaults): """ Build a preprocessed function with the same signature as `func`. Uses `exec` internally to build a function that actually has the same signature as `func. """ format_kwargs = {'func_name': func.__name__} def mangle(name): return 'a' + uuid4().hex + name format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__) def make_processor_assignment(arg, processor_name): template = "{arg} = {processor}({func}, '{arg}', {arg})" return template.format( arg=arg, processor=processor_name, func=mangled_funcname, ) exec_globals = {mangled_funcname: func, 'wraps': wraps} defaults_seen = 0 default_name_template = 'a' + uuid4().hex + '_%d' signature = [] call_args = [] assignments = [] for arg, default in args_defaults: if default is NO_DEFAULT: signature.append(arg) else: default_name = default_name_template % defaults_seen exec_globals[default_name] = default signature.append('='.join([arg, default_name])) defaults_seen += 1 if arg in processors: procname = mangle('_processor_' + arg) exec_globals[procname] = processors[arg] assignments.append(make_processor_assignment(arg, procname)) call_args.append(arg + '=' + arg) exec_str = dedent( """ @wraps({wrapped_funcname}) def {func_name}({signature}): {assignments} return {wrapped_funcname}({call_args}) """ ).format( func_name=func.__name__, signature=', '.join(signature), assignments='\n '.join(assignments), wrapped_funcname=mangled_funcname, call_args=', '.join(call_args), ) compiled = compile( exec_str, func.__code__.co_filename, mode='exec', ) exec_locals = {} exec_(compiled, exec_globals, exec_locals) return exec_locals[func.__name__]
# -*- coding: utf-8 -*- # Copyright (c) 2015 Ericsson AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from twisted.protocols import basic from twisted.internet import reactor from twisted.internet.protocol import Protocol, ClientFactory from twisted.internet.protocol import DatagramProtocol from calvin.utilities.calvinlogger import get_logger from calvin.utilities.calvin_callback import CalvinCBClass _log = get_logger(__name__) from calvin.utilities import calvinconfig _conf = calvinconfig.get() class DummyError(object): def __init__(self, str_): self._str = str_ def getErrorMessage(self): return self._str class UDPRawProtocol(CalvinCBClass, DatagramProtocol): def __init__(self, callbacks=None, **kwargs): super(UDPRawProtocol, self).__init__(callbacks) self.host = kwargs.pop('host', '') self.port = kwargs.pop('port', 0) self.factory = kwargs.pop('factory', None) def startProtocol(self): self.transport.connect(self.host, self.port) def stopProtocol(self): "Called after all transport is teared down" self.factory.clientConnectionLost(None, DummyError("disconnected")) def datagramReceived(self, data, (host, port)): self._callback_execute('data_received', data) def send(self, data): self.transport.write(data, (self.host, self.port)) class RawProtocol(CalvinCBClass, Protocol): def __init__(self, callbacks=None, **kwargs): super(RawProtocol, self).__init__(callbacks) self.host = kwargs.pop('host', '') self.port = kwargs.pop('port', 0) def dataReceived(self, data): self._callback_execute('data_received', data) def send(self, data): self.transport.write(data) def close(self): self.transport.loseConnection() class StringRecieverProtocol(CalvinCBClass, basic.Int16StringReceiver): def __init__(self, callbacks=None, **kwargs): super(StringRecieverProtocol, self).__init__(callbacks) self.host = kwargs.pop('host', '') self.port = kwargs.pop('port', 0) def stringReceived(self, data): self._callback_execute('data_received', data) class DelimiterProtocol(CalvinCBClass, basic.LineReceiver): def __init__(self, callbacks=None, **kwargs): self.delimiter = kwargs.pop('delimiter', '\r\n') self.host = kwargs.pop('host', '') self.port = kwargs.pop('port', 0) super(DelimiterProtocol, self).__init__(callbacks) def lineReceived(self, data): self._callback_execute('data_received', data) class BaseClientProtocolFactory(CalvinCBClass, ClientFactory): def __init__(self, callbacks=None): super(BaseClientProtocolFactory, self).__init__(callbacks) self._callbacks = callbacks self._addr = "" self._port = 0 self._delimiter = None self._connector = None self.protocol = None self._protocol_factory = None def startedConnecting(self, connector): pass def buildProtocol(self, addr): if not self._protocol_factory: raise Exception("No protocol factory set!") self.protocol = self._protocol_factory({'data_received': self._callbacks['data_received']}, delimiter=self._delimiter, host=self._addr, port=self._port, factory=self) reactor.callLater(0, self._callback_execute, 'connected', addr) return self.protocol def disconnect(self): if self._connector: # TODO: returns defered ?!? self._connector.disconnect() self._connector = None self.protocol = None def send(self, data): self.protocol.send(data) def clientConnectionLost(self, connector, reason): self._callback_execute('connection_lost', (self._addr, self._port), reason.getErrorMessage()) # TODO: returns defered ?!? def clientConnectionFailed(self, connector, reason): self._callback_execute('connection_failed', (self._addr, self._port), reason.getErrorMessage()) class UDPClientProtocolFactory(BaseClientProtocolFactory): def __init__(self, callbacks=None): super(UDPClientProtocolFactory, self).__init__(callbacks) self._addr = "" self._port = 0 self._protocol_factory = UDPRawProtocol def connect(self, addr, port): self._addr = addr self._port = port self._connector = reactor.listenUDP(0, self.buildProtocol((addr, port))) return self._connector class TCPClientProtocolFactory(BaseClientProtocolFactory): def __init__(self, mode, delimiter="\r\n", node_name=None, server_node_name=None, callbacks=None): super(TCPClientProtocolFactory, self).__init__(callbacks) self._protocol_factory = None self._protocol_type = mode self.protocol = None self._connector = None self._delimiter = delimiter self._addr = "" self._port = 0 self._server_node_name=server_node_name if mode == "raw": self._protocol_factory = RawProtocol elif mode == "string": self._protocol_factory = StringRecieverProtocol elif mode == "delimiter": self._protocol_factory = DelimiterProtocol else: _log.error("Trying use non existing protocol %s !" % (mode, )) raise Exception("Trying use non existing protocol %s !" % (mode, )) def connect(self, addr, port): self._addr = addr self._port = port control_interface_security = _conf.get("security","control_interface_security") if control_interface_security=="tls": try: ca_cert_list_str, ca_cert_list_x509, truststore = certificate.get_truststore(certificate.TRUSTSTORE_TRANSPORT) twisted_trusted_ca_cert = ssl.Certificate.loadPEM(ca_cert_list_str[0]) self.options = ssl.optionsForClientTLS(self._server_node_name, twisted_trusted_ca_cert) except Exception as err: _log.error("Failed to fetch client credentials, err={}".format(err)) raise try: endpoint = endpoints.SSL4ClientEndpoint(reactor, self._host_ip, int(self._host_port), self.options) except: _log.error("Client failed connectSSL") raise try: endpoint.connect(self._factory) except Exception as e: _log.error("Failed endpoint.connect, e={}".format(e)) raise else: return reactor.connectTCP(addr, port, self) def send(self, data): if self._protocol_type == "raw": self.protocol.send(data) elif self._protocol_type == "string": self.protocol.sendString(data) elif self._protocol_type == "delimiter": self.protocol.sendLine(data) else: _log.error("Trying use non existing protocol %s !" % self._protocol_type)
#!/usr/bin/env python3 # # Copyright (c) 2017-2018 Weitian LI <weitian@aaronly.me> # MIT License # """ Convert a FITS image to OSKAR sky model for simulation usage. NOTE ---- The OSKAR sky model consists of all the valid pixels (with absolute values within the specified minimum and maximum thresholds) from the given image (i.e., slice at a frequency channel), and fluxes are given in unit [Jy], therefore, the input image should be converted from brightness temperature [K] to unit [Jy/pixel]. References ---------- [1] GitHub: OxfordSKA/OSKAR https://github.com/OxfordSKA/OSKAR [2] OSKAR - Sky Model http://www.oerc.ox.ac.uk/~ska/oskar2/OSKAR-Sky-Model.pdf [3] OSKAR - Settings http://www.oerc.ox.ac.uk/~ska/oskar2/OSKAR-Settings.pdf """ import os import sys import argparse import logging from datetime import datetime import numpy as np import astropy.io.fits as fits import astropy.units as au from astropy.wcs import WCS logging.basicConfig(level=logging.INFO, format="[%(levelname)s:%(lineno)d] %(message)s") logger = logging.getLogger() class SkyModel: """ OSKAR sky model. Parameters ---------- image : 2D float `~numpy.ndarray` Input image array; unit [K] (brightness temperature) freq : float Frequency of the input image slice; unit [MHz] pixelsize : float Pixel size of the input image; Unit: [arcsec] ra0, dec0 : float The coordinate of the image center; unit [deg] minvalue : float, optional The minimum threshold for the image absolute values maxvalue : float, optional The maximum threshold for the image absolute values mask : 2D bool `~numpy.ndarray`, optional Use this mask to select the sources of the output sky model, instead of the above ``minvalue`` and ``maxvalue``. NOTE: Will overwrite the above ``minvalue`` and ``maxvalue``. projection : str, optional The WCS projection for the image; Default: "CAR" (Cartesian) TODO: support "SIN" etc. """ def __init__(self, image, freq, pixelsize, ra0, dec0, minvalue=1e-4, maxvalue=np.inf, mask=None, projection="CAR"): self.image = image # [K] (brightness temperature) self.freq = freq # [MHz] self.pixelsize = pixelsize # [arcsec] self.ra0 = ra0 # [deg] self.dec0 = dec0 # [deg] self.minvalue = minvalue self.maxvalue = maxvalue self.mask = mask self.projection = projection logger.info("SkyModel: Loaded image @ %.2f [MHz], " % freq + "%.1f [arcsec/pixel]" % pixelsize) logger.info("Image size: %dx%d" % self.shape) logger.info("FoV size: %.2fx%.2f [deg^2]" % self.fov) @property def shape(self): """ FITS image (width, height) """ width, height = list(reversed(self.image.shape))[:2] return (width, height) @property def fov(self): """ FITS image FoV size: (width, height) [deg] """ width, height = self.shape return (width*self.pixelsize/3600, height*self.pixelsize/3600) @property def wcs(self): """ WCS for the given image slice. """ shape = self.image.shape delta = self.pixelsize / 3600.0 # [arcsec] -> [deg] wcs_ = WCS(naxis=2) wcs_.wcs.ctype = ["RA---"+self.projection, "DEC--"+self.projection] wcs_.wcs.crval = np.array([self.ra0, self.dec0]) wcs_.wcs.crpix = np.array([shape[1], shape[0]]) / 2.0 + 1 wcs_.wcs.cdelt = np.array([-delta, delta]) # NOTE the minus sign return wcs_ @property def fits_header(self): header = self.wcs.to_header() header["BUNIT"] = ("Jy/pixel", "Brightness unit") header["FREQ"] = (self.freq, "Frequency [MHz]") header["RA0"] = (self.ra0, "Center R.A. [deg]") header["DEC0"] = (self.dec0, "Center Dec. [deg]") header["PixSize"] = (self.pixelsize, "Pixel size [arcsec]") header["K2JyPix"] = (self.factor_K2JyPixel, "[K] -> [Jy/pixel]") header["MINVALUE"] = (self.minvalue, "[K] minimum threshold") if np.isfinite(self.maxvalue): header["MAXVALUE"] = (self.maxvalue, "[K] maximum threshold") return header @property def factor_K2JyPixel(self): """ Conversion factor from [K] to [Jy/pixel] """ pixarea = (self.pixelsize * au.arcsec) ** 2 freq = self.freq * au.MHz equiv = au.brightness_temperature(pixarea, freq) factor = au.K.to(au.Jy, equivalencies=equiv) return factor @property def ra_dec(self): """ Calculate the (ra, dec) of each image pixel using the above WCS. NOTE: axis ordering difference between numpy array and FITS """ shape = self.image.shape wcs = self.wcs x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) pix = np.column_stack([x.flatten(), y.flatten()]) world = wcs.wcs_pix2world(pix, 0) ra = world[:, 0].reshape(shape) dec = world[:, 1].reshape(shape) return (ra, dec) @property def mask(self): if self._mask is None: self._mask = ((np.abs(self.image) >= self.minvalue) & (np.abs(self.image) <= self.maxvalue)) logger.info("Use minimum and maximum thresholds: [%.4e, %.4e]" % (self.minvalue, self.maxvalue)) return self._mask @mask.setter def mask(self, value): if (value is not None) and (value.shape != self.image.shape): raise ValueError("mask shape does match image!") self._mask = value @property def sky(self): """ OSKAR sky model array converted from the input image. Columns ------- ra : (J2000) right ascension (deg) dec : (J2000) declination (deg) flux : source (Stokes I) flux density (Jy) """ idx = self.mask.flatten() ra, dec = self.ra_dec ra = ra.flatten()[idx] dec = dec.flatten()[idx] flux = self.image.flatten()[idx] * self.factor_K2JyPixel sky_ = np.column_stack([ra, dec, flux]) return sky_ def write_sky_model(self, outfile, clobber=False): """ Write the converted sky model for simulation. """ if os.path.exists(outfile) and (not clobber): raise OSError("OSKAR sky model file already exists: %s" % outfile) sky = self.sky counts = sky.shape[0] percent = 100 * counts / self.image.size logger.info("Source counts: %d (%.1f%%)" % (counts, percent)) header = ("Frequency = %.3f [MHz]\n" % self.freq + "Pixel size = %.2f [arcsec]\n" % self.pixelsize + "K2JyPixel = %.3e\n" % self.factor_K2JyPixel + "RA0 = %.4f [deg]\n" % self.ra0 + "Dec0 = %.4f [deg]\n" % self.dec0 + "Minimum value = %.4e [K]\n" % self.minvalue + "Maximum value = %.4e [K]\n" % self.maxvalue + "Source counts = %d (%.1f%%)\n\n" % (counts, percent) + "R.A.[deg] Dec.[deg] flux[Jy]") logger.info("Writing sky model ...") np.savetxt(outfile, sky, fmt='%.10e, %.10e, %.10e', header=header) logger.info("Wrote OSKAR sky model to file: %s" % outfile) def write_fits(self, outfile, oldheader=None, clobber=False): if os.path.exists(outfile) and (not clobber): raise OSError("Sky FITS already exists: %s" % outfile) if oldheader is not None: header = oldheader header.extend(self.fits_header, update=True) else: header = self.fits_header header.add_history(datetime.now().isoformat()) header.add_history(" ".join(sys.argv)) image = self.image.copy() image[~self.mask] = np.nan image *= self.factor_K2JyPixel hdu = fits.PrimaryHDU(data=image, header=header) try: hdu.writeto(outfile, overwrite=True) except TypeError: hdu.writeto(outfile, clobber=True) # old astropy versions logger.info("Wrote FITS image of sky model to file: %s" % outfile) def write_mask(self, outfile, clobber=False): if os.path.exists(outfile) and (not clobber): raise OSError("Sky mask already exists: %s" % outfile) header = self.fits_header header.add_history(datetime.now().isoformat()) header.add_history(" ".join(sys.argv)) hdu = fits.PrimaryHDU(data=self.mask.astype(np.int16), header=header) try: hdu.writeto(outfile, overwrite=True) except TypeError: hdu.writeto(outfile, clobber=True) # old astropy versions logger.info("Wrote mask of sky model to file: %s" % outfile) def main(): parser = argparse.ArgumentParser( description="Convert FITS image to OSKAR sky model") parser.add_argument("-C", "--clobber", dest="clobber", action="store_true", help="overwrite existing file") parser.add_argument("-r", "--ra0", dest="ra0", type=float, default=0.0, help="[deg] R.A. of the image center (default: 0)") parser.add_argument("-d", "--dec0", dest="dec0", type=float, default=-27.0, help="[deg] Dec. of the image center (default: -27)") parser.add_argument("-p", "--pixel-size", dest="pixelsize", type=float, help="image pixel size [arcsec]; " + "(default: obtain from the FITS header 'PixSize')") parser.add_argument("-f", "--freq", dest="freq", type=float, help="frequency [MHz] the image measured; " + "(default: obtain from the FITS header 'FREQ')") parser.add_argument("-m", "--min-value", dest="minvalue", type=float, default=1e-4, help="[K] minimum threshold to the output sky model " + "(default: 1e-4, i.e., 0.1 mK)") parser.add_argument("-M", "--max-value", dest="maxvalue", type=float, default=np.inf, help="[K] maximum threshold to the output sky model " + "(default: inf)") parser.add_argument("--mask", dest="mask", help="use a mask to determine the output sky model " + "(NOTE: will override --min-value and --max-value)") parser.add_argument("-F", "--osm-fits", dest="osmfits", action="store_true", help="save a FITS version of the converted sky model") parser.add_argument("-o", "--outdir", dest="outdir", help="output directory for sky model files " + "(default: current working directory)") parser.add_argument("--create-mask", dest="create_mask", help="create a FITS mask for the output sky model") parser.add_argument("infile", help="input FITS image") parser.add_argument("outfile", nargs="?", help="output OSKAR sky model (default: " + "same basename as the input FITS image)") args = parser.parse_args() if args.outfile: outfile = args.outfile else: outfile = os.path.splitext(os.path.basename(args.infile))[0] + ".osm" if args.outdir: outfile = os.path.join(args.outdir, outfile) if not os.path.exists(args.outdir): os.mkdir(args.outdir) with fits.open(args.infile) as f: image = f[0].data.astype(np.float32) header = f[0].header.copy(strip=True) logger.info("Read input FITS image: %s" % args.infile) # Check data unit unit = header.get("BUNIT") if unit is None: logger.warning("Input FITS file of unknown data unit! " + "Assuming [K] (kelvin)!") elif unit.upper() not in ["K", "KELVIN"]: logger.error("Input FITS file of wrong data unit: %s" % unit) freq = args.freq if args.freq else header["FREQ"] # [MHz] if args.pixelsize: pixelsize = args.pixelsize # [arcsec] else: pixelsize = header["PixSize"] # [arcsec] logger.info("Frequency: %.2f [MHz]" % freq) logger.info("Pixel size: %.2f [arcsec]" % pixelsize) if args.mask: mask = fits.open(args.mask)[0].data.astype(np.bool) logger.info("Loaded sky mask from file: %s" % args.mask) else: mask = None logger.info("Threshold: %g - %g [K]" % (args.minvalue, args.maxvalue)) skymodel = SkyModel(image=image, freq=freq, ra0=args.ra0, dec0=args.dec0, pixelsize=pixelsize, minvalue=args.minvalue, maxvalue=args.maxvalue, mask=mask) logger.info("Conversion [K] -> [Jy/pixel]: %g" % skymodel.factor_K2JyPixel) skymodel.write_sky_model(outfile, clobber=args.clobber) if args.osmfits: outfits = outfile + ".fits" skymodel.write_fits(outfits, oldheader=header, clobber=args.clobber) if args.create_mask: skymodel.write_mask(args.create_mask, clobber=args.clobber) if __name__ == "__main__": main()
# -*- test-case-name: twisted.test.test_pb -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Perspective Broker \"This isn\'t a professional opinion, but it's probably got enough internet to kill you.\" --glyph Introduction ============ This is a broker for proxies for and copies of objects. It provides a translucent interface layer to those proxies. The protocol is not opaque, because it provides objects which represent the remote proxies and require no context (server references, IDs) to operate on. It is not transparent because it does I{not} attempt to make remote objects behave identically, or even similiarly, to local objects. Method calls are invoked asynchronously, and specific rules are applied when serializing arguments. To get started, begin with L{PBClientFactory} and L{PBServerFactory}. @author: Glyph Lefkowitz """ import random import types from hashlib import md5 from zope.interface import implements, Interface # Twisted Imports from twisted.python import log, failure, reflect from twisted.internet import defer, protocol from twisted.cred.portal import Portal from twisted.cred.credentials import IAnonymous, ICredentials from twisted.cred.credentials import IUsernameHashedPassword, Anonymous from twisted.persisted import styles from twisted.python.components import registerAdapter from twisted.spread.interfaces import IJellyable, IUnjellyable from twisted.spread.jelly import jelly, unjelly, globalSecurity from twisted.spread import banana from twisted.spread.flavors import Serializable from twisted.spread.flavors import Referenceable, NoSuchMethod from twisted.spread.flavors import Root, IPBRoot from twisted.spread.flavors import ViewPoint from twisted.spread.flavors import Viewable from twisted.spread.flavors import Copyable from twisted.spread.flavors import Jellyable from twisted.spread.flavors import Cacheable from twisted.spread.flavors import RemoteCopy from twisted.spread.flavors import RemoteCache from twisted.spread.flavors import RemoteCacheObserver from twisted.spread.flavors import copyTags from twisted.spread.flavors import setUnjellyableForClass from twisted.spread.flavors import setUnjellyableFactoryForClass from twisted.spread.flavors import setUnjellyableForClassTree # These three are backwards compatibility aliases for the previous three. # Ultimately they should be deprecated. -exarkun from twisted.spread.flavors import setCopierForClass from twisted.spread.flavors import setFactoryForClass from twisted.spread.flavors import setCopierForClassTree MAX_BROKER_REFS = 1024 portno = 8787 class ProtocolError(Exception): """ This error is raised when an invalid protocol statement is received. """ class DeadReferenceError(ProtocolError): """ This error is raised when a method is called on a dead reference (one whose broker has been disconnected). """ class Error(Exception): """ This error can be raised to generate known error conditions. When a PB callable method (perspective_, remote_, view_) raises this error, it indicates that a traceback should not be printed, but instead, the string representation of the exception should be sent. """ class RemoteError(Exception): """ This class is used to wrap a string-ified exception from the remote side to be able to reraise it. (Raising string exceptions is no longer possible in Python 2.6+) The value of this exception will be a str() representation of the remote value. @ivar remoteType: The full import path of the exception class which was raised on the remote end. @type remoteType: C{str} @ivar remoteTraceback: The remote traceback. @type remoteTraceback: C{str} @note: It's not possible to include the remoteTraceback if this exception is thrown into a generator. It must be accessed as an attribute. """ def __init__(self, remoteType, value, remoteTraceback): Exception.__init__(self, value) self.remoteType = remoteType self.remoteTraceback = remoteTraceback class RemoteMethod: """ This is a translucent reference to a remote message. """ def __init__(self, obj, name): """ Initialize with a L{RemoteReference} and the name of this message. """ self.obj = obj self.name = name def __cmp__(self, other): return cmp((self.obj, self.name), other) def __hash__(self): return hash((self.obj, self.name)) def __call__(self, *args, **kw): """ Asynchronously invoke a remote method. """ return self.obj.broker._sendMessage('',self.obj.perspective, self.obj.luid, self.name, args, kw) class PBConnectionLost(Exception): pass class IPerspective(Interface): """ per*spec*tive, n. : The relationship of aspects of a subject to each other and to a whole: 'a perspective of history'; 'a need to view the problem in the proper perspective'. This is a Perspective Broker-specific wrapper for an avatar. That is to say, a PB-published view on to the business logic for the system's concept of a 'user'. The concept of attached/detached is no longer implemented by the framework. The realm is expected to implement such semantics if needed. """ def perspectiveMessageReceived(broker, message, args, kwargs): """ This method is called when a network message is received. @arg broker: The Perspective Broker. @type message: str @arg message: The name of the method called by the other end. @type args: list in jelly format @arg args: The arguments that were passed by the other end. It is recommend that you use the `unserialize' method of the broker to decode this. @type kwargs: dict in jelly format @arg kwargs: The keyword arguments that were passed by the other end. It is recommended that you use the `unserialize' method of the broker to decode this. @rtype: A jelly list. @return: It is recommended that you use the `serialize' method of the broker on whatever object you need to return to generate the return value. """ class Avatar: """ A default IPerspective implementor. This class is intended to be subclassed, and a realm should return an instance of such a subclass when IPerspective is requested of it. A peer requesting a perspective will receive only a L{RemoteReference} to a pb.Avatar. When a method is called on that L{RemoteReference}, it will translate to a method on the remote perspective named 'perspective_methodname'. (For more information on invoking methods on other objects, see L{flavors.ViewPoint}.) """ implements(IPerspective) def perspectiveMessageReceived(self, broker, message, args, kw): """ This method is called when a network message is received. This will call:: self.perspective_%(message)s(*broker.unserialize(args), **broker.unserialize(kw)) to handle the method; subclasses of Avatar are expected to implement methods using this naming convention. """ args = broker.unserialize(args, self) kw = broker.unserialize(kw, self) method = getattr(self, "perspective_%s" % message) try: state = method(*args, **kw) except TypeError: log.msg("%s didn't accept %s and %s" % (method, args, kw)) raise return broker.serialize(state, self, method, args, kw) class AsReferenceable(Referenceable): """ A reference directed towards another object. """ def __init__(self, object, messageType="remote"): self.remoteMessageReceived = getattr( object, messageType + "MessageReceived") class RemoteReference(Serializable, styles.Ephemeral): """ A translucent reference to a remote object. I may be a reference to a L{flavors.ViewPoint}, a L{flavors.Referenceable}, or an L{IPerspective} implementor (e.g., pb.Avatar). From the client's perspective, it is not possible to tell which except by convention. I am a \"translucent\" reference because although no additional bookkeeping overhead is given to the application programmer for manipulating a reference, return values are asynchronous. See also L{twisted.internet.defer}. @ivar broker: The broker I am obtained through. @type broker: L{Broker} """ implements(IUnjellyable) def __init__(self, perspective, broker, luid, doRefCount): """(internal) Initialize me with a broker and a locally-unique ID. The ID is unique only to the particular Perspective Broker instance. """ self.luid = luid self.broker = broker self.doRefCount = doRefCount self.perspective = perspective self.disconnectCallbacks = [] def notifyOnDisconnect(self, callback): """Register a callback to be called if our broker gets disconnected. This callback will be called with one argument, this instance. """ assert callable(callback) self.disconnectCallbacks.append(callback) if len(self.disconnectCallbacks) == 1: self.broker.notifyOnDisconnect(self._disconnected) def dontNotifyOnDisconnect(self, callback): """Remove a callback that was registered with notifyOnDisconnect.""" self.disconnectCallbacks.remove(callback) if not self.disconnectCallbacks: self.broker.dontNotifyOnDisconnect(self._disconnected) def _disconnected(self): """Called if we are disconnected and have callbacks registered.""" for callback in self.disconnectCallbacks: callback(self) self.disconnectCallbacks = None def jellyFor(self, jellier): """If I am being sent back to where I came from, serialize as a local backreference. """ if jellier.invoker: assert self.broker == jellier.invoker, "Can't send references to brokers other than their own." return "local", self.luid else: return "unpersistable", "References cannot be serialized" def unjellyFor(self, unjellier, unjellyList): self.__init__(unjellier.invoker.unserializingPerspective, unjellier.invoker, unjellyList[1], 1) return self def callRemote(self, _name, *args, **kw): """Asynchronously invoke a remote method. @type _name: C{str} @param _name: the name of the remote method to invoke @param args: arguments to serialize for the remote function @param kw: keyword arguments to serialize for the remote function. @rtype: L{twisted.internet.defer.Deferred} @returns: a Deferred which will be fired when the result of this remote call is received. """ # note that we use '_name' instead of 'name' so the user can call # remote methods with 'name' as a keyword parameter, like this: # ref.callRemote("getPeopleNamed", count=12, name="Bob") return self.broker._sendMessage('',self.perspective, self.luid, _name, args, kw) def remoteMethod(self, key): """Get a L{RemoteMethod} for this key. """ return RemoteMethod(self, key) def __cmp__(self,other): """Compare me [to another L{RemoteReference}]. """ if isinstance(other, RemoteReference): if other.broker == self.broker: return cmp(self.luid, other.luid) return cmp(self.broker, other) def __hash__(self): """Hash me. """ return self.luid def __del__(self): """Do distributed reference counting on finalization. """ if self.doRefCount: self.broker.sendDecRef(self.luid) setUnjellyableForClass("remote", RemoteReference) class Local: """(internal) A reference to a local object. """ def __init__(self, object, perspective=None): """Initialize. """ self.object = object self.perspective = perspective self.refcount = 1 def __repr__(self): return "<pb.Local %r ref:%s>" % (self.object, self.refcount) def incref(self): """Increment and return my reference count. """ self.refcount = self.refcount + 1 return self.refcount def decref(self): """Decrement and return my reference count. """ self.refcount = self.refcount - 1 return self.refcount ## # Failure ## class CopyableFailure(failure.Failure, Copyable): """ A L{flavors.RemoteCopy} and L{flavors.Copyable} version of L{twisted.python.failure.Failure} for serialization. """ unsafeTracebacks = 0 def getStateToCopy(self): """ Collect state related to the exception which occurred, discarding state which cannot reasonably be serialized. """ state = self.__dict__.copy() state['tb'] = None state['frames'] = [] state['stack'] = [] state['value'] = str(self.value) # Exception instance if isinstance(self.type, str): state['type'] = self.type else: state['type'] = reflect.qual(self.type) # Exception class if self.unsafeTracebacks: state['traceback'] = self.getTraceback() else: state['traceback'] = 'Traceback unavailable\n' return state class CopiedFailure(RemoteCopy, failure.Failure): """ A L{CopiedFailure} is a L{pb.RemoteCopy} of a L{failure.Failure} transfered via PB. @ivar type: The full import path of the exception class which was raised on the remote end. @type type: C{str} @ivar value: A str() representation of the remote value. @type value: L{CopiedFailure} or C{str} @ivar traceback: The remote traceback. @type traceback: C{str} """ def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'): if file is None: file = log.logfile file.write("Traceback from remote host -- ") file.write(self.traceback) file.write(self.type + ": " + self.value) file.write('\n') def throwExceptionIntoGenerator(self, g): """ Throw the original exception into the given generator, preserving traceback information if available. In the case of a L{CopiedFailure} where the exception type is a string, a L{pb.RemoteError} is thrown instead. @return: The next value yielded from the generator. @raise StopIteration: If there are no more values in the generator. @raise RemoteError: The wrapped remote exception. """ return g.throw(RemoteError(self.type, self.value, self.traceback)) printBriefTraceback = printTraceback printDetailedTraceback = printTraceback setUnjellyableForClass(CopyableFailure, CopiedFailure) def failure2Copyable(fail, unsafeTracebacks=0): f = types.InstanceType(CopyableFailure, fail.__dict__) f.unsafeTracebacks = unsafeTracebacks return f class Broker(banana.Banana): """I am a broker for objects. """ version = 6 username = None factory = None def __init__(self, isClient=1, security=globalSecurity): banana.Banana.__init__(self, isClient) self.disconnected = 0 self.disconnects = [] self.failures = [] self.connects = [] self.localObjects = {} self.security = security self.pageProducers = [] self.currentRequestID = 0 self.currentLocalID = 0 self.unserializingPerspective = None # Some terms: # PUID: process unique ID; return value of id() function. type "int". # LUID: locally unique ID; an ID unique to an object mapped over this # connection. type "int" # GUID: (not used yet) globally unique ID; an ID for an object which # may be on a redirected or meta server. Type as yet undecided. # Dictionary mapping LUIDs to local objects. # set above to allow root object to be assigned before connection is made # self.localObjects = {} # Dictionary mapping PUIDs to LUIDs. self.luids = {} # Dictionary mapping LUIDs to local (remotely cached) objects. Remotely # cached means that they're objects which originate here, and were # copied remotely. self.remotelyCachedObjects = {} # Dictionary mapping PUIDs to (cached) LUIDs self.remotelyCachedLUIDs = {} # Dictionary mapping (remote) LUIDs to (locally cached) objects. self.locallyCachedObjects = {} self.waitingForAnswers = {} # Mapping from LUIDs to weakref objects with callbacks for performing # any local cleanup which may be necessary for the corresponding # object once it no longer exists. self._localCleanup = {} def resumeProducing(self): """Called when the consumer attached to me runs out of buffer. """ # Go backwards over the list so we can remove indexes from it as we go for pageridx in xrange(len(self.pageProducers)-1, -1, -1): pager = self.pageProducers[pageridx] pager.sendNextPage() if not pager.stillPaging(): del self.pageProducers[pageridx] if not self.pageProducers: self.transport.unregisterProducer() # Streaming producer methods; not necessary to implement. def pauseProducing(self): pass def stopProducing(self): pass def registerPageProducer(self, pager): self.pageProducers.append(pager) if len(self.pageProducers) == 1: self.transport.registerProducer(self, 0) def expressionReceived(self, sexp): """Evaluate an expression as it's received. """ if isinstance(sexp, types.ListType): command = sexp[0] methodName = "proto_%s" % command method = getattr(self, methodName, None) if method: method(*sexp[1:]) else: self.sendCall("didNotUnderstand", command) else: raise ProtocolError("Non-list expression received.") def proto_version(self, vnum): """Protocol message: (version version-number) Check to make sure that both ends of the protocol are speaking the same version dialect. """ if vnum != self.version: raise ProtocolError("Version Incompatibility: %s %s" % (self.version, vnum)) def sendCall(self, *exp): """Utility method to send an expression to the other side of the connection. """ self.sendEncoded(exp) def proto_didNotUnderstand(self, command): """Respond to stock 'C{didNotUnderstand}' message. Log the command that was not understood and continue. (Note: this will probably be changed to close the connection or raise an exception in the future.) """ log.msg("Didn't understand command: %r" % command) def connectionReady(self): """Initialize. Called after Banana negotiation is done. """ self.sendCall("version", self.version) for notifier in self.connects: try: notifier() except: log.deferr() self.connects = None if self.factory: # in tests we won't have factory self.factory.clientConnectionMade(self) def connectionFailed(self): # XXX should never get called anymore? check! for notifier in self.failures: try: notifier() except: log.deferr() self.failures = None waitingForAnswers = None def connectionLost(self, reason): """The connection was lost. """ self.disconnected = 1 # nuke potential circular references. self.luids = None if self.waitingForAnswers: for d in self.waitingForAnswers.values(): try: d.errback(failure.Failure(PBConnectionLost(reason))) except: log.deferr() # Assure all Cacheable.stoppedObserving are called for lobj in self.remotelyCachedObjects.values(): cacheable = lobj.object perspective = lobj.perspective try: cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective)) except: log.deferr() # Loop on a copy to prevent notifiers to mixup # the list by calling dontNotifyOnDisconnect for notifier in self.disconnects[:]: try: notifier() except: log.deferr() self.disconnects = None self.waitingForAnswers = None self.localSecurity = None self.remoteSecurity = None self.remotelyCachedObjects = None self.remotelyCachedLUIDs = None self.locallyCachedObjects = None self.localObjects = None def notifyOnDisconnect(self, notifier): """Call the given callback when the Broker disconnects.""" assert callable(notifier) self.disconnects.append(notifier) def notifyOnFail(self, notifier): """Call the given callback if the Broker fails to connect.""" assert callable(notifier) self.failures.append(notifier) def notifyOnConnect(self, notifier): """Call the given callback when the Broker connects.""" assert callable(notifier) if self.connects is None: try: notifier() except: log.err() else: self.connects.append(notifier) def dontNotifyOnDisconnect(self, notifier): """Remove a callback from list of disconnect callbacks.""" try: self.disconnects.remove(notifier) except ValueError: pass def localObjectForID(self, luid): """ Get a local object for a locally unique ID. @return: An object previously stored with L{registerReference} or C{None} if there is no object which corresponds to the given identifier. """ lob = self.localObjects.get(luid) if lob is None: return return lob.object maxBrokerRefsViolations = 0 def registerReference(self, object): """Get an ID for a local object. Store a persistent reference to a local object and map its id() to a generated, session-unique ID and return that ID. """ assert object is not None puid = object.processUniqueID() luid = self.luids.get(puid) if luid is None: if len(self.localObjects) > MAX_BROKER_REFS: self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1 if self.maxBrokerRefsViolations > 3: self.transport.loseConnection() raise Error("Maximum PB reference count exceeded. " "Goodbye.") raise Error("Maximum PB reference count exceeded.") luid = self.newLocalID() self.localObjects[luid] = Local(object) self.luids[puid] = luid else: self.localObjects[luid].incref() return luid def setNameForLocal(self, name, object): """Store a special (string) ID for this object. This is how you specify a 'base' set of objects that the remote protocol can connect to. """ assert object is not None self.localObjects[name] = Local(object) def remoteForName(self, name): """Returns an object from the remote name mapping. Note that this does not check the validity of the name, only creates a translucent reference for it. """ return RemoteReference(None, self, name, 0) def cachedRemotelyAs(self, instance, incref=0): """Returns an ID that says what this instance is cached as remotely, or C{None} if it's not. """ puid = instance.processUniqueID() luid = self.remotelyCachedLUIDs.get(puid) if (luid is not None) and (incref): self.remotelyCachedObjects[luid].incref() return luid def remotelyCachedForLUID(self, luid): """Returns an instance which is cached remotely, with this LUID. """ return self.remotelyCachedObjects[luid].object def cacheRemotely(self, instance): """ XXX""" puid = instance.processUniqueID() luid = self.newLocalID() if len(self.remotelyCachedObjects) > MAX_BROKER_REFS: self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1 if self.maxBrokerRefsViolations > 3: self.transport.loseConnection() raise Error("Maximum PB cache count exceeded. " "Goodbye.") raise Error("Maximum PB cache count exceeded.") self.remotelyCachedLUIDs[puid] = luid # This table may not be necessary -- for now, it's to make sure that no # monkey business happens with id(instance) self.remotelyCachedObjects[luid] = Local(instance, self.serializingPerspective) return luid def cacheLocally(self, cid, instance): """(internal) Store a non-filled-out cached instance locally. """ self.locallyCachedObjects[cid] = instance def cachedLocallyAs(self, cid): instance = self.locallyCachedObjects[cid] return instance def serialize(self, object, perspective=None, method=None, args=None, kw=None): """Jelly an object according to the remote security rules for this broker. """ if isinstance(object, defer.Deferred): object.addCallbacks(self.serialize, lambda x: x, callbackKeywords={ 'perspective': perspective, 'method': method, 'args': args, 'kw': kw }) return object # XXX This call is NOT REENTRANT and testing for reentrancy is just # crazy, so it likely won't be. Don't ever write methods that call the # broker's serialize() method recursively (e.g. sending a method call # from within a getState (this causes concurrency problems anyway so # you really, really shouldn't do it)) # self.jellier = _NetJellier(self) self.serializingPerspective = perspective self.jellyMethod = method self.jellyArgs = args self.jellyKw = kw try: return jelly(object, self.security, None, self) finally: self.serializingPerspective = None self.jellyMethod = None self.jellyArgs = None self.jellyKw = None def unserialize(self, sexp, perspective = None): """Unjelly an sexp according to the local security rules for this broker. """ self.unserializingPerspective = perspective try: return unjelly(sexp, self.security, None, self) finally: self.unserializingPerspective = None def newLocalID(self): """Generate a new LUID. """ self.currentLocalID = self.currentLocalID + 1 return self.currentLocalID def newRequestID(self): """Generate a new request ID. """ self.currentRequestID = self.currentRequestID + 1 return self.currentRequestID def _sendMessage(self, prefix, perspective, objectID, message, args, kw): pbc = None pbe = None answerRequired = 1 if 'pbcallback' in kw: pbc = kw['pbcallback'] del kw['pbcallback'] if 'pberrback' in kw: pbe = kw['pberrback'] del kw['pberrback'] if 'pbanswer' in kw: assert (not pbe) and (not pbc), "You can't specify a no-answer requirement." answerRequired = kw['pbanswer'] del kw['pbanswer'] if self.disconnected: raise DeadReferenceError("Calling Stale Broker") try: netArgs = self.serialize(args, perspective=perspective, method=message) netKw = self.serialize(kw, perspective=perspective, method=message) except: return defer.fail(failure.Failure()) requestID = self.newRequestID() if answerRequired: rval = defer.Deferred() self.waitingForAnswers[requestID] = rval if pbc or pbe: log.msg('warning! using deprecated "pbcallback"') rval.addCallbacks(pbc, pbe) else: rval = None self.sendCall(prefix+"message", requestID, objectID, message, answerRequired, netArgs, netKw) return rval def proto_message(self, requestID, objectID, message, answerRequired, netArgs, netKw): self._recvMessage(self.localObjectForID, requestID, objectID, message, answerRequired, netArgs, netKw) def proto_cachemessage(self, requestID, objectID, message, answerRequired, netArgs, netKw): self._recvMessage(self.cachedLocallyAs, requestID, objectID, message, answerRequired, netArgs, netKw) def _recvMessage(self, findObjMethod, requestID, objectID, message, answerRequired, netArgs, netKw): """Received a message-send. Look up message based on object, unserialize the arguments, and invoke it with args, and send an 'answer' or 'error' response. """ try: object = findObjMethod(objectID) if object is None: raise Error("Invalid Object ID") netResult = object.remoteMessageReceived(self, message, netArgs, netKw) except Error, e: if answerRequired: # If the error is Jellyable or explicitly allowed via our # security options, send it back and let the code on the # other end deal with unjellying. If it isn't Jellyable, # wrap it in a CopyableFailure, which ensures it can be # unjellied on the other end. We have to do this because # all errors must be sent back. if isinstance(e, Jellyable) or self.security.isClassAllowed(e.__class__): self._sendError(e, requestID) else: self._sendError(CopyableFailure(e), requestID) except: if answerRequired: log.msg("Peer will receive following PB traceback:", isError=True) f = CopyableFailure() self._sendError(f, requestID) log.err() else: if answerRequired: if isinstance(netResult, defer.Deferred): args = (requestID,) netResult.addCallbacks(self._sendAnswer, self._sendFailureOrError, callbackArgs=args, errbackArgs=args) # XXX Should this be done somewhere else? else: self._sendAnswer(netResult, requestID) ## # success ## def _sendAnswer(self, netResult, requestID): """(internal) Send an answer to a previously sent message. """ self.sendCall("answer", requestID, netResult) def proto_answer(self, requestID, netResult): """(internal) Got an answer to a previously sent message. Look up the appropriate callback and call it. """ d = self.waitingForAnswers[requestID] del self.waitingForAnswers[requestID] d.callback(self.unserialize(netResult)) ## # failure ## def _sendFailureOrError(self, fail, requestID): """ Call L{_sendError} or L{_sendFailure}, depending on whether C{fail} represents an L{Error} subclass or not. """ if fail.check(Error) is None: self._sendFailure(fail, requestID) else: self._sendError(fail, requestID) def _sendFailure(self, fail, requestID): """Log error and then send it.""" log.msg("Peer will receive following PB traceback:") log.err(fail) self._sendError(fail, requestID) def _sendError(self, fail, requestID): """(internal) Send an error for a previously sent message. """ if isinstance(fail, failure.Failure): # If the failures value is jellyable or allowed through security, # send the value if (isinstance(fail.value, Jellyable) or self.security.isClassAllowed(fail.value.__class__)): fail = fail.value elif not isinstance(fail, CopyableFailure): fail = failure2Copyable(fail, self.factory.unsafeTracebacks) if isinstance(fail, CopyableFailure): fail.unsafeTracebacks = self.factory.unsafeTracebacks self.sendCall("error", requestID, self.serialize(fail)) def proto_error(self, requestID, fail): """(internal) Deal with an error. """ d = self.waitingForAnswers[requestID] del self.waitingForAnswers[requestID] d.errback(self.unserialize(fail)) ## # refcounts ## def sendDecRef(self, objectID): """(internal) Send a DECREF directive. """ self.sendCall("decref", objectID) def proto_decref(self, objectID): """(internal) Decrement the reference count of an object. If the reference count is zero, it will free the reference to this object. """ refs = self.localObjects[objectID].decref() if refs == 0: puid = self.localObjects[objectID].object.processUniqueID() del self.luids[puid] del self.localObjects[objectID] self._localCleanup.pop(puid, lambda: None)() ## # caching ## def decCacheRef(self, objectID): """(internal) Send a DECACHE directive. """ self.sendCall("decache", objectID) def proto_decache(self, objectID): """(internal) Decrement the reference count of a cached object. If the reference count is zero, free the reference, then send an 'uncached' directive. """ refs = self.remotelyCachedObjects[objectID].decref() # log.msg('decaching: %s #refs: %s' % (objectID, refs)) if refs == 0: lobj = self.remotelyCachedObjects[objectID] cacheable = lobj.object perspective = lobj.perspective # TODO: force_decache needs to be able to force-invalidate a # cacheable reference. try: cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective)) except: log.deferr() puid = cacheable.processUniqueID() del self.remotelyCachedLUIDs[puid] del self.remotelyCachedObjects[objectID] self.sendCall("uncache", objectID) def proto_uncache(self, objectID): """(internal) Tell the client it is now OK to uncache an object. """ # log.msg("uncaching locally %d" % objectID) obj = self.locallyCachedObjects[objectID] obj.broker = None ## def reallyDel(obj=obj): ## obj.__really_del__() ## obj.__del__ = reallyDel del self.locallyCachedObjects[objectID] def respond(challenge, password): """Respond to a challenge. This is useful for challenge/response authentication. """ m = md5() m.update(password) hashedPassword = m.digest() m = md5() m.update(hashedPassword) m.update(challenge) doubleHashedPassword = m.digest() return doubleHashedPassword def challenge(): """I return some random data.""" crap = '' for x in range(random.randrange(15,25)): crap = crap + chr(random.randint(65,90)) crap = md5(crap).digest() return crap class PBClientFactory(protocol.ClientFactory): """ Client factory for PB brokers. As with all client factories, use with reactor.connectTCP/SSL/etc.. getPerspective and getRootObject can be called either before or after the connect. """ protocol = Broker unsafeTracebacks = False def __init__(self, unsafeTracebacks=False, security=globalSecurity): """ @param unsafeTracebacks: if set, tracebacks for exceptions will be sent over the wire. @type unsafeTracebacks: C{bool} @param security: security options used by the broker, default to C{globalSecurity}. @type security: L{twisted.spread.jelly.SecurityOptions} """ self.unsafeTracebacks = unsafeTracebacks self.security = security self._reset() def buildProtocol(self, addr): """ Build the broker instance, passing the security options to it. """ p = self.protocol(isClient=True, security=self.security) p.factory = self return p def _reset(self): self.rootObjectRequests = [] # list of deferred self._broker = None self._root = None def _failAll(self, reason): deferreds = self.rootObjectRequests self._reset() for d in deferreds: d.errback(reason) def clientConnectionFailed(self, connector, reason): self._failAll(reason) def clientConnectionLost(self, connector, reason, reconnecting=0): """Reconnecting subclasses should call with reconnecting=1.""" if reconnecting: # any pending requests will go to next connection attempt # so we don't fail them. self._broker = None self._root = None else: self._failAll(reason) def clientConnectionMade(self, broker): self._broker = broker self._root = broker.remoteForName("root") ds = self.rootObjectRequests self.rootObjectRequests = [] for d in ds: d.callback(self._root) def getRootObject(self): """Get root object of remote PB server. @return: Deferred of the root object. """ if self._broker and not self._broker.disconnected: return defer.succeed(self._root) d = defer.Deferred() self.rootObjectRequests.append(d) return d def disconnect(self): """If the factory is connected, close the connection. Note that if you set up the factory to reconnect, you will need to implement extra logic to prevent automatic reconnection after this is called. """ if self._broker: self._broker.transport.loseConnection() def _cbSendUsername(self, root, username, password, client): return root.callRemote("login", username).addCallback( self._cbResponse, password, client) def _cbResponse(self, (challenge, challenger), password, client): return challenger.callRemote("respond", respond(challenge, password), client) def _cbLoginAnonymous(self, root, client): """ Attempt an anonymous login on the given remote root object. @type root: L{RemoteReference} @param root: The object on which to attempt the login, most likely returned by a call to L{PBClientFactory.getRootObject}. @param client: A jellyable object which will be used as the I{mind} parameter for the login attempt. @rtype: L{Deferred} @return: A L{Deferred} which will be called back with a L{RemoteReference} to an avatar when anonymous login succeeds, or which will errback if anonymous login fails. """ return root.callRemote("loginAnonymous", client) def login(self, credentials, client=None): """ Login and get perspective from remote PB server. Currently the following credentials are supported:: L{twisted.cred.credentials.IUsernamePassword} L{twisted.cred.credentials.IAnonymous} @rtype: L{Deferred} @return: A L{Deferred} which will be called back with a L{RemoteReference} for the avatar logged in to, or which will errback if login fails. """ d = self.getRootObject() if IAnonymous.providedBy(credentials): d.addCallback(self._cbLoginAnonymous, client) else: d.addCallback( self._cbSendUsername, credentials.username, credentials.password, client) return d class PBServerFactory(protocol.ServerFactory): """ Server factory for perspective broker. Login is done using a Portal object, whose realm is expected to return avatars implementing IPerspective. The credential checkers in the portal should accept IUsernameHashedPassword or IUsernameMD5Password. Alternatively, any object providing or adaptable to L{IPBRoot} can be used instead of a portal to provide the root object of the PB server. """ unsafeTracebacks = False # object broker factory protocol = Broker def __init__(self, root, unsafeTracebacks=False, security=globalSecurity): """ @param root: factory providing the root Referenceable used by the broker. @type root: object providing or adaptable to L{IPBRoot}. @param unsafeTracebacks: if set, tracebacks for exceptions will be sent over the wire. @type unsafeTracebacks: C{bool} @param security: security options used by the broker, default to C{globalSecurity}. @type security: L{twisted.spread.jelly.SecurityOptions} """ self.root = IPBRoot(root) self.unsafeTracebacks = unsafeTracebacks self.security = security def buildProtocol(self, addr): """ Return a Broker attached to the factory (as the service provider). """ proto = self.protocol(isClient=False, security=self.security) proto.factory = self proto.setNameForLocal("root", self.root.rootObject(proto)) return proto def clientConnectionMade(self, protocol): # XXX does this method make any sense? pass class IUsernameMD5Password(ICredentials): """ I encapsulate a username and a hashed password. This credential is used for username/password over PB. CredentialCheckers which check this kind of credential must store the passwords in plaintext form or as a MD5 digest. @type username: C{str} or C{Deferred} @ivar username: The username associated with these credentials. """ def checkPassword(password): """ Validate these credentials against the correct password. @type password: C{str} @param password: The correct, plaintext password against which to check. @rtype: C{bool} or L{Deferred} @return: C{True} if the credentials represented by this object match the given password, C{False} if they do not, or a L{Deferred} which will be called back with one of these values. """ def checkMD5Password(password): """ Validate these credentials against the correct MD5 digest of the password. @type password: C{str} @param password: The correct MD5 digest of a password against which to check. @rtype: C{bool} or L{Deferred} @return: C{True} if the credentials represented by this object match the given digest, C{False} if they do not, or a L{Deferred} which will be called back with one of these values. """ class _PortalRoot: """Root object, used to login to portal.""" implements(IPBRoot) def __init__(self, portal): self.portal = portal def rootObject(self, broker): return _PortalWrapper(self.portal, broker) registerAdapter(_PortalRoot, Portal, IPBRoot) class _JellyableAvatarMixin: """ Helper class for code which deals with avatars which PB must be capable of sending to a peer. """ def _cbLogin(self, (interface, avatar, logout)): """ Ensure that the avatar to be returned to the client is jellyable and set up disconnection notification to call the realm's logout object. """ if not IJellyable.providedBy(avatar): avatar = AsReferenceable(avatar, "perspective") puid = avatar.processUniqueID() # only call logout once, whether the connection is dropped (disconnect) # or a logout occurs (cleanup), and be careful to drop the reference to # it in either case logout = [ logout ] def maybeLogout(): if not logout: return fn = logout[0] del logout[0] fn() self.broker._localCleanup[puid] = maybeLogout self.broker.notifyOnDisconnect(maybeLogout) return avatar class _PortalWrapper(Referenceable, _JellyableAvatarMixin): """ Root Referenceable object, used to login to portal. """ def __init__(self, portal, broker): self.portal = portal self.broker = broker def remote_login(self, username): """ Start of username/password login. """ c = challenge() return c, _PortalAuthChallenger(self.portal, self.broker, username, c) def remote_loginAnonymous(self, mind): """ Attempt an anonymous login. @param mind: An object to use as the mind parameter to the portal login call (possibly None). @rtype: L{Deferred} @return: A Deferred which will be called back with an avatar when login succeeds or which will be errbacked if login fails somehow. """ d = self.portal.login(Anonymous(), mind, IPerspective) d.addCallback(self._cbLogin) return d class _PortalAuthChallenger(Referenceable, _JellyableAvatarMixin): """ Called with response to password challenge. """ implements(IUsernameHashedPassword, IUsernameMD5Password) def __init__(self, portal, broker, username, challenge): self.portal = portal self.broker = broker self.username = username self.challenge = challenge def remote_respond(self, response, mind): self.response = response d = self.portal.login(self, mind, IPerspective) d.addCallback(self._cbLogin) return d # IUsernameHashedPassword: def checkPassword(self, password): return self.checkMD5Password(md5(password).digest()) # IUsernameMD5Password def checkMD5Password(self, md5Password): md = md5() md.update(md5Password) md.update(self.challenge) correct = md.digest() return self.response == correct __all__ = [ # Everything from flavors is exposed publically here. 'IPBRoot', 'Serializable', 'Referenceable', 'NoSuchMethod', 'Root', 'ViewPoint', 'Viewable', 'Copyable', 'Jellyable', 'Cacheable', 'RemoteCopy', 'RemoteCache', 'RemoteCacheObserver', 'copyTags', 'setUnjellyableForClass', 'setUnjellyableFactoryForClass', 'setUnjellyableForClassTree', 'setCopierForClass', 'setFactoryForClass', 'setCopierForClassTree', 'MAX_BROKER_REFS', 'portno', 'ProtocolError', 'DeadReferenceError', 'Error', 'PBConnectionLost', 'RemoteMethod', 'IPerspective', 'Avatar', 'AsReferenceable', 'RemoteReference', 'CopyableFailure', 'CopiedFailure', 'failure2Copyable', 'Broker', 'respond', 'challenge', 'PBClientFactory', 'PBServerFactory', 'IUsernameMD5Password', ]
""" The service module for OpenBSD .. important:: If you feel that Salt should be using this module to manage services on a minion, and it is using a different module (or gives an error similar to *'service.start' is not available*), see :ref:`here <module-provider-override>`. """ import fnmatch import logging import os import re import salt.utils.data import salt.utils.files log = logging.getLogger(__name__) # XXX enable/disable support would be nice # Define the module's virtual name __virtualname__ = "service" __func_alias__ = {"reload_": "reload"} def __virtual__(): """ Only work on OpenBSD """ if __grains__["os"] == "OpenBSD" and os.path.exists("/etc/rc.d/rc.subr"): krel = list(list(map(int, __grains__["kernelrelease"].split(".")))) # The -f flag, used to force a script to run even if disabled, # was added after the 5.0 release. # the rcctl(8) command is the preferred way to manage services. if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0): if not os.path.exists("/usr/sbin/rcctl"): return __virtualname__ return ( False, "The openbsdservice execution module cannot be loaded: " "only available on OpenBSD systems.", ) def start(name): """ Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> """ cmd = "/etc/rc.d/{} -f start".format(name) return not __salt__["cmd.retcode"](cmd) def stop(name): """ Stop the specified service CLI Example: .. code-block:: bash salt '*' service.stop <service name> """ cmd = "/etc/rc.d/{} -f stop".format(name) return not __salt__["cmd.retcode"](cmd) def restart(name): """ Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> """ cmd = "/etc/rc.d/{} -f restart".format(name) return not __salt__["cmd.retcode"](cmd) def status(name, sig=None): """ Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Signature to use to find the service via ps Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> [service signature] """ if sig: return bool(__salt__["status.pid"](sig)) contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = "/etc/rc.d/{} -f check".format(service) results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True) if contains_globbing: return results return results[name] def reload_(name): """ .. versionadded:: 2014.7.0 Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> """ cmd = "/etc/rc.d/{} -f reload".format(name) return not __salt__["cmd.retcode"](cmd) service_flags_regex = re.compile(r"^\s*(\w[\d\w]*)_flags=(?:(NO)|.*)$") pkg_scripts_regex = re.compile(r"^\s*pkg_scripts=\'(.*)\'$") start_daemon_call_regex = re.compile(r"(\s*start_daemon(?!\(\)))") start_daemon_parameter_regex = re.compile(r"(?:\s+(\w[\w\d]*))") def _get_rc(): """ Returns a dict where the key is the daemon's name and the value a boolean indicating its status (True: enabled or False: disabled). Check the daemons started by the system in /etc/rc and configured in /etc/rc.conf and /etc/rc.conf.local. Also add to the dict all the localy enabled daemons via $pkg_scripts. """ daemons_flags = {} try: # now read the system startup script /etc/rc # to know what are the system enabled daemons with salt.utils.files.fopen("/etc/rc", "r") as handle: lines = salt.utils.data.decode(handle.readlines()) except OSError: log.error("Unable to read /etc/rc") else: for line in lines: match = start_daemon_call_regex.match(line) if match: # the matched line is a call to start_daemon() # we remove the function name line = line[len(match.group(1)) :] # we retrieve each daemon name from the parameters of start_daemon() for daemon in start_daemon_parameter_regex.findall(line): # mark it as enabled daemons_flags[daemon] = True # this will execute rc.conf and rc.conf.local # used in /etc/rc at boot to start the daemons variables = __salt__["cmd.run"]( "(. /etc/rc.conf && set)", clean_env=True, output_loglevel="quiet", python_shell=True, ).split("\n") for var in variables: match = service_flags_regex.match(var) if match: # the matched var look like daemon_name_flags=, we test its assigned value # NO: disabled, everything else: enabled # do not create a new key if the service hasn't been found in /etc/rc, see $pkg_scripts if match.group(2) == "NO": daemons_flags[match.group(1)] = False else: match = pkg_scripts_regex.match(var) if match: # the matched var is pkg_scripts # we can retrieve the name of each localy enabled daemon that wasn't hand started via /etc/rc for daemon in match.group(1).split(): # create a new key and mark it as enabled daemons_flags[daemon] = True return daemons_flags def available(name): """ .. versionadded:: 2014.7.0 Returns ``True`` if the specified service is available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.available sshd """ path = "/etc/rc.d/{}".format(name) return os.path.isfile(path) and os.access(path, os.X_OK) def missing(name): """ .. versionadded:: 2014.7.0 The inverse of service.available. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd """ return not available(name) def get_all(): """ .. versionadded:: 2014.7.0 Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all """ services = [] if not os.path.isdir("/etc/rc.d"): return services for service in os.listdir("/etc/rc.d"): # this will remove rc.subr and all non executable files if available(service): services.append(service) return sorted(services) def get_enabled(): """ .. versionadded:: 2014.7.0 Return a list of service that are enabled on boot CLI Example: .. code-block:: bash salt '*' service.get_enabled """ services = [] for daemon, is_enabled in _get_rc().items(): if is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services)) def enabled(name, **kwargs): """ .. versionadded:: 2014.7.0 Return True if the named service is enabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.enabled <service name> """ return name in get_enabled() def get_disabled(): """ .. versionadded:: 2014.7.0 Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled """ services = [] for daemon, is_enabled in _get_rc().items(): if not is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services)) def disabled(name): """ .. versionadded:: 2014.7.0 Return True if the named service is disabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.disabled <service name> """ return name in get_disabled()
"""Selector event loop for Unix with signal handling.""" import errno import io import itertools import os import selectors import signal import socket import stat import subprocess import sys import threading import warnings from . import base_events from . import base_subprocess from . import constants from . import coroutines from . import events from . import exceptions from . import futures from . import selector_events from . import tasks from . import transports from .log import logger __all__ = ( 'SelectorEventLoop', 'AbstractChildWatcher', 'SafeChildWatcher', 'FastChildWatcher', 'MultiLoopChildWatcher', 'ThreadedChildWatcher', 'DefaultEventLoopPolicy', ) if sys.platform == 'win32': # pragma: no cover raise ImportError('Signals are not really supported on Windows') def _sighandler_noop(signum, frame): """Dummy signal handler.""" pass class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): """Unix event loop. Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. """ def __init__(self, selector=None): super().__init__(selector) self._signal_handlers = {} def close(self): super().close() if not sys.is_finalizing(): for sig in list(self._signal_handlers): self.remove_signal_handler(sig) else: if self._signal_handlers: warnings.warn(f"Closing the loop {self!r} " f"on interpreter shutdown " f"stage, skipping signal handlers removal", ResourceWarning, source=self) self._signal_handlers.clear() def _process_self_data(self, data): for signum in data: if not signum: # ignore null bytes written by _write_to_self() continue self._handle_signal(signum) def add_signal_handler(self, sig, callback, *args): """Add a handler for a signal. UNIX only. Raise ValueError if the signal number is invalid or uncatchable. Raise RuntimeError if there is a problem setting up the handler. """ if (coroutines.iscoroutine(callback) or coroutines.iscoroutinefunction(callback)): raise TypeError("coroutines cannot be used " "with add_signal_handler()") self._check_signal(sig) self._check_closed() try: # set_wakeup_fd() raises ValueError if this is not the # main thread. By calling it early we ensure that an # event loop running in another thread cannot add a signal # handler. signal.set_wakeup_fd(self._csock.fileno()) except (ValueError, OSError) as exc: raise RuntimeError(str(exc)) handle = events.Handle(callback, args, self, None) self._signal_handlers[sig] = handle try: # Register a dummy signal handler to ask Python to write the signal # number in the wakup file descriptor. _process_self_data() will # read signal numbers from this file descriptor to handle signals. signal.signal(sig, _sighandler_noop) # Set SA_RESTART to limit EINTR occurrences. signal.siginterrupt(sig, False) except OSError as exc: del self._signal_handlers[sig] if not self._signal_handlers: try: signal.set_wakeup_fd(-1) except (ValueError, OSError) as nexc: logger.info('set_wakeup_fd(-1) failed: %s', nexc) if exc.errno == errno.EINVAL: raise RuntimeError(f'sig {sig} cannot be caught') else: raise def _handle_signal(self, sig): """Internal helper that is the actual signal handler.""" handle = self._signal_handlers.get(sig) if handle is None: return # Assume it's some race condition. if handle._cancelled: self.remove_signal_handler(sig) # Remove it properly. else: self._add_callback_signalsafe(handle) def remove_signal_handler(self, sig): """Remove a handler for a signal. UNIX only. Return True if a signal handler was removed, False if not. """ self._check_signal(sig) try: del self._signal_handlers[sig] except KeyError: return False if sig == signal.SIGINT: handler = signal.default_int_handler else: handler = signal.SIG_DFL try: signal.signal(sig, handler) except OSError as exc: if exc.errno == errno.EINVAL: raise RuntimeError(f'sig {sig} cannot be caught') else: raise if not self._signal_handlers: try: signal.set_wakeup_fd(-1) except (ValueError, OSError) as exc: logger.info('set_wakeup_fd(-1) failed: %s', exc) return True def _check_signal(self, sig): """Internal helper to validate a signal. Raise ValueError if the signal number is invalid or uncatchable. Raise RuntimeError if there is a problem setting up the handler. """ if not isinstance(sig, int): raise TypeError(f'sig must be an int, not {sig!r}') if sig not in signal.valid_signals(): raise ValueError(f'invalid signal number {sig}') def _make_read_pipe_transport(self, pipe, protocol, waiter=None, extra=None): return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra) def _make_write_pipe_transport(self, pipe, protocol, waiter=None, extra=None): return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra) async def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): with events.get_child_watcher() as watcher: if not watcher.is_active(): # Check early. # Raising exception before process creation # prevents subprocess execution if the watcher # is not ready to handle it. raise RuntimeError("asyncio.get_child_watcher() is not activated, " "subprocess support is not installed.") waiter = self.create_future() transp = _UnixSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, **kwargs) watcher.add_child_handler(transp.get_pid(), self._child_watcher_callback, transp) try: await waiter except (SystemExit, KeyboardInterrupt): raise except BaseException: transp.close() await transp._wait() raise return transp def _child_watcher_callback(self, pid, returncode, transp): self.call_soon_threadsafe(transp._process_exited, returncode) async def create_unix_connection( self, protocol_factory, path=None, *, ssl=None, sock=None, server_hostname=None, ssl_handshake_timeout=None): assert server_hostname is None or isinstance(server_hostname, str) if ssl: if server_hostname is None: raise ValueError( 'you have to pass server_hostname when using ssl') else: if server_hostname is not None: raise ValueError('server_hostname is only meaningful with ssl') if ssl_handshake_timeout is not None: raise ValueError( 'ssl_handshake_timeout is only meaningful with ssl') if path is not None: if sock is not None: raise ValueError( 'path and sock can not be specified at the same time') path = os.fspath(path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) try: sock.setblocking(False) await self.sock_connect(sock, path) except: sock.close() raise else: if sock is None: raise ValueError('no path and sock were specified') if (sock.family != socket.AF_UNIX or sock.type != socket.SOCK_STREAM): raise ValueError( f'A UNIX Domain Stream Socket was expected, got {sock!r}') sock.setblocking(False) transport, protocol = await self._create_connection_transport( sock, protocol_factory, ssl, server_hostname, ssl_handshake_timeout=ssl_handshake_timeout) return transport, protocol async def create_unix_server( self, protocol_factory, path=None, *, sock=None, backlog=100, ssl=None, ssl_handshake_timeout=None, start_serving=True): if isinstance(ssl, bool): raise TypeError('ssl argument must be an SSLContext or None') if ssl_handshake_timeout is not None and not ssl: raise ValueError( 'ssl_handshake_timeout is only meaningful with ssl') if path is not None: if sock is not None: raise ValueError( 'path and sock can not be specified at the same time') path = os.fspath(path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # Check for abstract socket. `str` and `bytes` paths are supported. if path[0] not in (0, '\x00'): try: if stat.S_ISSOCK(os.stat(path).st_mode): os.remove(path) except FileNotFoundError: pass except OSError as err: # Directory may have permissions only to create socket. logger.error('Unable to check or remove stale UNIX socket ' '%r: %r', path, err) try: sock.bind(path) except OSError as exc: sock.close() if exc.errno == errno.EADDRINUSE: # Let's improve the error message by adding # with what exact address it occurs. msg = f'Address {path!r} is already in use' raise OSError(errno.EADDRINUSE, msg) from None else: raise except: sock.close() raise else: if sock is None: raise ValueError( 'path was not specified, and no sock specified') if (sock.family != socket.AF_UNIX or sock.type != socket.SOCK_STREAM): raise ValueError( f'A UNIX Domain Stream Socket was expected, got {sock!r}') sock.setblocking(False) server = base_events.Server(self, [sock], protocol_factory, ssl, backlog, ssl_handshake_timeout) if start_serving: server._start_serving() # Skip one loop iteration so that all 'loop.add_reader' # go through. await tasks.sleep(0, loop=self) return server async def _sock_sendfile_native(self, sock, file, offset, count): try: os.sendfile except AttributeError as exc: raise exceptions.SendfileNotAvailableError( "os.sendfile() is not available") try: fileno = file.fileno() except (AttributeError, io.UnsupportedOperation) as err: raise exceptions.SendfileNotAvailableError("not a regular file") try: fsize = os.fstat(fileno).st_size except OSError as err: raise exceptions.SendfileNotAvailableError("not a regular file") blocksize = count if count else fsize if not blocksize: return 0 # empty file fut = self.create_future() self._sock_sendfile_native_impl(fut, None, sock, fileno, offset, count, blocksize, 0) return await fut def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno, offset, count, blocksize, total_sent): fd = sock.fileno() if registered_fd is not None: # Remove the callback early. It should be rare that the # selector says the fd is ready but the call still returns # EAGAIN, and I am willing to take a hit in that case in # order to simplify the common case. self.remove_writer(registered_fd) if fut.cancelled(): self._sock_sendfile_update_filepos(fileno, offset, total_sent) return if count: blocksize = count - total_sent if blocksize <= 0: self._sock_sendfile_update_filepos(fileno, offset, total_sent) fut.set_result(total_sent) return try: sent = os.sendfile(fd, fileno, offset, blocksize) except (BlockingIOError, InterruptedError): if registered_fd is None: self._sock_add_cancellation_callback(fut, sock) self.add_writer(fd, self._sock_sendfile_native_impl, fut, fd, sock, fileno, offset, count, blocksize, total_sent) except OSError as exc: if (registered_fd is not None and exc.errno == errno.ENOTCONN and type(exc) is not ConnectionError): # If we have an ENOTCONN and this isn't a first call to # sendfile(), i.e. the connection was closed in the middle # of the operation, normalize the error to ConnectionError # to make it consistent across all Posix systems. new_exc = ConnectionError( "socket is not connected", errno.ENOTCONN) new_exc.__cause__ = exc exc = new_exc if total_sent == 0: # We can get here for different reasons, the main # one being 'file' is not a regular mmap(2)-like # file, in which case we'll fall back on using # plain send(). err = exceptions.SendfileNotAvailableError( "os.sendfile call failed") self._sock_sendfile_update_filepos(fileno, offset, total_sent) fut.set_exception(err) else: self._sock_sendfile_update_filepos(fileno, offset, total_sent) fut.set_exception(exc) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._sock_sendfile_update_filepos(fileno, offset, total_sent) fut.set_exception(exc) else: if sent == 0: # EOF self._sock_sendfile_update_filepos(fileno, offset, total_sent) fut.set_result(total_sent) else: offset += sent total_sent += sent if registered_fd is None: self._sock_add_cancellation_callback(fut, sock) self.add_writer(fd, self._sock_sendfile_native_impl, fut, fd, sock, fileno, offset, count, blocksize, total_sent) def _sock_sendfile_update_filepos(self, fileno, offset, total_sent): if total_sent > 0: os.lseek(fileno, offset, os.SEEK_SET) def _sock_add_cancellation_callback(self, fut, sock): def cb(fut): if fut.cancelled(): fd = sock.fileno() if fd != -1: self.remove_writer(fd) fut.add_done_callback(cb) class _UnixReadPipeTransport(transports.ReadTransport): max_size = 256 * 1024 # max bytes we read in one event loop iteration def __init__(self, loop, pipe, protocol, waiter=None, extra=None): super().__init__(extra) self._extra['pipe'] = pipe self._loop = loop self._pipe = pipe self._fileno = pipe.fileno() self._protocol = protocol self._closing = False self._paused = False mode = os.fstat(self._fileno).st_mode if not (stat.S_ISFIFO(mode) or stat.S_ISSOCK(mode) or stat.S_ISCHR(mode)): self._pipe = None self._fileno = None self._protocol = None raise ValueError("Pipe transport is for pipes/sockets only.") os.set_blocking(self._fileno, False) self._loop.call_soon(self._protocol.connection_made, self) # only start reading when connection_made() has been called self._loop.call_soon(self._loop._add_reader, self._fileno, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called self._loop.call_soon(futures._set_result_unless_cancelled, waiter, None) def __repr__(self): info = [self.__class__.__name__] if self._pipe is None: info.append('closed') elif self._closing: info.append('closing') info.append(f'fd={self._fileno}') selector = getattr(self._loop, '_selector', None) if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( selector, self._fileno, selectors.EVENT_READ) if polling: info.append('polling') else: info.append('idle') elif self._pipe is not None: info.append('open') else: info.append('closed') return '<{}>'.format(' '.join(info)) def _read_ready(self): try: data = os.read(self._fileno, self.max_size) except (BlockingIOError, InterruptedError): pass except OSError as exc: self._fatal_error(exc, 'Fatal read error on pipe transport') else: if data: self._protocol.data_received(data) else: if self._loop.get_debug(): logger.info("%r was closed by peer", self) self._closing = True self._loop._remove_reader(self._fileno) self._loop.call_soon(self._protocol.eof_received) self._loop.call_soon(self._call_connection_lost, None) def pause_reading(self): if self._closing or self._paused: return self._paused = True self._loop._remove_reader(self._fileno) if self._loop.get_debug(): logger.debug("%r pauses reading", self) def resume_reading(self): if self._closing or not self._paused: return self._paused = False self._loop._add_reader(self._fileno, self._read_ready) if self._loop.get_debug(): logger.debug("%r resumes reading", self) def set_protocol(self, protocol): self._protocol = protocol def get_protocol(self): return self._protocol def is_closing(self): return self._closing def close(self): if not self._closing: self._close(None) def __del__(self, _warn=warnings.warn): if self._pipe is not None: _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) self._pipe.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): # should be called by exception handler only if (isinstance(exc, OSError) and exc.errno == errno.EIO): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: self._loop.call_exception_handler({ 'message': message, 'exception': exc, 'transport': self, 'protocol': self._protocol, }) self._close(exc) def _close(self, exc): self._closing = True self._loop._remove_reader(self._fileno) self._loop.call_soon(self._call_connection_lost, exc) def _call_connection_lost(self, exc): try: self._protocol.connection_lost(exc) finally: self._pipe.close() self._pipe = None self._protocol = None self._loop = None class _UnixWritePipeTransport(transports._FlowControlMixin, transports.WriteTransport): def __init__(self, loop, pipe, protocol, waiter=None, extra=None): super().__init__(extra, loop) self._extra['pipe'] = pipe self._pipe = pipe self._fileno = pipe.fileno() self._protocol = protocol self._buffer = bytearray() self._conn_lost = 0 self._closing = False # Set when close() or write_eof() called. mode = os.fstat(self._fileno).st_mode is_char = stat.S_ISCHR(mode) is_fifo = stat.S_ISFIFO(mode) is_socket = stat.S_ISSOCK(mode) if not (is_char or is_fifo or is_socket): self._pipe = None self._fileno = None self._protocol = None raise ValueError("Pipe transport is only for " "pipes, sockets and character devices") os.set_blocking(self._fileno, False) self._loop.call_soon(self._protocol.connection_made, self) # On AIX, the reader trick (to be notified when the read end of the # socket is closed) only works for sockets. On other platforms it # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.) if is_socket or (is_fifo and not sys.platform.startswith("aix")): # only start reading when connection_made() has been called self._loop.call_soon(self._loop._add_reader, self._fileno, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called self._loop.call_soon(futures._set_result_unless_cancelled, waiter, None) def __repr__(self): info = [self.__class__.__name__] if self._pipe is None: info.append('closed') elif self._closing: info.append('closing') info.append(f'fd={self._fileno}') selector = getattr(self._loop, '_selector', None) if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( selector, self._fileno, selectors.EVENT_WRITE) if polling: info.append('polling') else: info.append('idle') bufsize = self.get_write_buffer_size() info.append(f'bufsize={bufsize}') elif self._pipe is not None: info.append('open') else: info.append('closed') return '<{}>'.format(' '.join(info)) def get_write_buffer_size(self): return len(self._buffer) def _read_ready(self): # Pipe was closed by peer. if self._loop.get_debug(): logger.info("%r was closed by peer", self) if self._buffer: self._close(BrokenPipeError()) else: self._close() def write(self, data): assert isinstance(data, (bytes, bytearray, memoryview)), repr(data) if isinstance(data, bytearray): data = memoryview(data) if not data: return if self._conn_lost or self._closing: if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: logger.warning('pipe closed by peer or ' 'os.write(pipe, data) raised exception.') self._conn_lost += 1 return if not self._buffer: # Attempt to send it right away first. try: n = os.write(self._fileno, data) except (BlockingIOError, InterruptedError): n = 0 except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._conn_lost += 1 self._fatal_error(exc, 'Fatal write error on pipe transport') return if n == len(data): return elif n > 0: data = memoryview(data)[n:] self._loop._add_writer(self._fileno, self._write_ready) self._buffer += data self._maybe_pause_protocol() def _write_ready(self): assert self._buffer, 'Data should not be empty' try: n = os.write(self._fileno, self._buffer) except (BlockingIOError, InterruptedError): pass except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._buffer.clear() self._conn_lost += 1 # Remove writer here, _fatal_error() doesn't it # because _buffer is empty. self._loop._remove_writer(self._fileno) self._fatal_error(exc, 'Fatal write error on pipe transport') else: if n == len(self._buffer): self._buffer.clear() self._loop._remove_writer(self._fileno) self._maybe_resume_protocol() # May append to buffer. if self._closing: self._loop._remove_reader(self._fileno) self._call_connection_lost(None) return elif n > 0: del self._buffer[:n] def can_write_eof(self): return True def write_eof(self): if self._closing: return assert self._pipe self._closing = True if not self._buffer: self._loop._remove_reader(self._fileno) self._loop.call_soon(self._call_connection_lost, None) def set_protocol(self, protocol): self._protocol = protocol def get_protocol(self): return self._protocol def is_closing(self): return self._closing def close(self): if self._pipe is not None and not self._closing: # write_eof is all what we needed to close the write pipe self.write_eof() def __del__(self, _warn=warnings.warn): if self._pipe is not None: _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) self._pipe.close() def abort(self): self._close(None) def _fatal_error(self, exc, message='Fatal error on pipe transport'): # should be called by exception handler only if isinstance(exc, OSError): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: self._loop.call_exception_handler({ 'message': message, 'exception': exc, 'transport': self, 'protocol': self._protocol, }) self._close(exc) def _close(self, exc=None): self._closing = True if self._buffer: self._loop._remove_writer(self._fileno) self._buffer.clear() self._loop._remove_reader(self._fileno) self._loop.call_soon(self._call_connection_lost, exc) def _call_connection_lost(self, exc): try: self._protocol.connection_lost(exc) finally: self._pipe.close() self._pipe = None self._protocol = None self._loop = None class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): stdin_w = None if stdin == subprocess.PIPE: # Use a socket pair for stdin, since not all platforms # support selecting read events on the write end of a # socket (which we use in order to detect closing of the # other end). Notably this is needed on AIX, and works # just fine on other platforms. stdin, stdin_w = socket.socketpair() try: self._proc = subprocess.Popen( args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, universal_newlines=False, bufsize=bufsize, **kwargs) if stdin_w is not None: stdin.close() self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize) stdin_w = None finally: if stdin_w is not None: stdin.close() stdin_w.close() class AbstractChildWatcher: """Abstract base class for monitoring child processes. Objects derived from this class monitor a collection of subprocesses and report their termination or interruption by a signal. New callbacks are registered with .add_child_handler(). Starting a new process must be done within a 'with' block to allow the watcher to suspend its activity until the new process if fully registered (this is needed to prevent a race condition in some implementations). Example: with watcher: proc = subprocess.Popen("sleep 1") watcher.add_child_handler(proc.pid, callback) Notes: Implementations of this class must be thread-safe. Since child watcher objects may catch the SIGCHLD signal and call waitpid(-1), there should be only one active object per process. """ def add_child_handler(self, pid, callback, *args): """Register a new child handler. Arrange for callback(pid, returncode, *args) to be called when process 'pid' terminates. Specifying another callback for the same process replaces the previous handler. Note: callback() must be thread-safe. """ raise NotImplementedError() def remove_child_handler(self, pid): """Removes the handler for process 'pid'. The function returns True if the handler was successfully removed, False if there was nothing to remove.""" raise NotImplementedError() def attach_loop(self, loop): """Attach the watcher to an event loop. If the watcher was previously attached to an event loop, then it is first detached before attaching to the new loop. Note: loop may be None. """ raise NotImplementedError() def close(self): """Close the watcher. This must be called to make sure that any underlying resource is freed. """ raise NotImplementedError() def is_active(self): """Return ``True`` if the watcher is active and is used by the event loop. Return True if the watcher is installed and ready to handle process exit notifications. """ raise NotImplementedError() def __enter__(self): """Enter the watcher's context and allow starting new processes This function must return self""" raise NotImplementedError() def __exit__(self, a, b, c): """Exit the watcher's context""" raise NotImplementedError() def _compute_returncode(status): if os.WIFSIGNALED(status): # The child process died because of a signal. return -os.WTERMSIG(status) elif os.WIFEXITED(status): # The child process exited (e.g sys.exit()). return os.WEXITSTATUS(status) else: # The child exited, but we don't understand its status. # This shouldn't happen, but if it does, let's just # return that status; perhaps that helps debug it. return status class BaseChildWatcher(AbstractChildWatcher): def __init__(self): self._loop = None self._callbacks = {} def close(self): self.attach_loop(None) def is_active(self): return self._loop is not None and self._loop.is_running() def _do_waitpid(self, expected_pid): raise NotImplementedError() def _do_waitpid_all(self): raise NotImplementedError() def attach_loop(self, loop): assert loop is None or isinstance(loop, events.AbstractEventLoop) if self._loop is not None and loop is None and self._callbacks: warnings.warn( 'A loop is being detached ' 'from a child watcher with pending handlers', RuntimeWarning) if self._loop is not None: self._loop.remove_signal_handler(signal.SIGCHLD) self._loop = loop if loop is not None: loop.add_signal_handler(signal.SIGCHLD, self._sig_chld) # Prevent a race condition in case a child terminated # during the switch. self._do_waitpid_all() def _sig_chld(self): try: self._do_waitpid_all() except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: # self._loop should always be available here # as '_sig_chld' is added as a signal handler # in 'attach_loop' self._loop.call_exception_handler({ 'message': 'Unknown exception in SIGCHLD handler', 'exception': exc, }) class SafeChildWatcher(BaseChildWatcher): """'Safe' child watcher implementation. This implementation avoids disrupting other code spawning processes by polling explicitly each process in the SIGCHLD handler instead of calling os.waitpid(-1). This is a safe solution but it has a significant overhead when handling a big number of children (O(n) each time SIGCHLD is raised) """ def close(self): self._callbacks.clear() super().close() def __enter__(self): return self def __exit__(self, a, b, c): pass def add_child_handler(self, pid, callback, *args): self._callbacks[pid] = (callback, args) # Prevent a race condition in case the child is already terminated. self._do_waitpid(pid) def remove_child_handler(self, pid): try: del self._callbacks[pid] return True except KeyError: return False def _do_waitpid_all(self): for pid in list(self._callbacks): self._do_waitpid(pid) def _do_waitpid(self, expected_pid): assert expected_pid > 0 try: pid, status = os.waitpid(expected_pid, os.WNOHANG) except ChildProcessError: # The child process is already reaped # (may happen if waitpid() is called elsewhere). pid = expected_pid returncode = 255 logger.warning( "Unknown child process pid %d, will report returncode 255", pid) else: if pid == 0: # The child process is still alive. return returncode = _compute_returncode(status) if self._loop.get_debug(): logger.debug('process %s exited with returncode %s', expected_pid, returncode) try: callback, args = self._callbacks.pop(pid) except KeyError: # pragma: no cover # May happen if .remove_child_handler() is called # after os.waitpid() returns. if self._loop.get_debug(): logger.warning("Child watcher got an unexpected pid: %r", pid, exc_info=True) else: callback(pid, returncode, *args) class FastChildWatcher(BaseChildWatcher): """'Fast' child watcher implementation. This implementation reaps every terminated processes by calling os.waitpid(-1) directly, possibly breaking other code spawning processes and waiting for their termination. There is no noticeable overhead when handling a big number of children (O(1) each time a child terminates). """ def __init__(self): super().__init__() self._lock = threading.Lock() self._zombies = {} self._forks = 0 def close(self): self._callbacks.clear() self._zombies.clear() super().close() def __enter__(self): with self._lock: self._forks += 1 return self def __exit__(self, a, b, c): with self._lock: self._forks -= 1 if self._forks or not self._zombies: return collateral_victims = str(self._zombies) self._zombies.clear() logger.warning( "Caught subprocesses termination from unknown pids: %s", collateral_victims) def add_child_handler(self, pid, callback, *args): assert self._forks, "Must use the context manager" with self._lock: try: returncode = self._zombies.pop(pid) except KeyError: # The child is running. self._callbacks[pid] = callback, args return # The child is dead already. We can fire the callback. callback(pid, returncode, *args) def remove_child_handler(self, pid): try: del self._callbacks[pid] return True except KeyError: return False def _do_waitpid_all(self): # Because of signal coalescing, we must keep calling waitpid() as # long as we're able to reap a child. while True: try: pid, status = os.waitpid(-1, os.WNOHANG) except ChildProcessError: # No more child processes exist. return else: if pid == 0: # A child process is still alive. return returncode = _compute_returncode(status) with self._lock: try: callback, args = self._callbacks.pop(pid) except KeyError: # unknown child if self._forks: # It may not be registered yet. self._zombies[pid] = returncode if self._loop.get_debug(): logger.debug('unknown process %s exited ' 'with returncode %s', pid, returncode) continue callback = None else: if self._loop.get_debug(): logger.debug('process %s exited with returncode %s', pid, returncode) if callback is None: logger.warning( "Caught subprocess termination from unknown pid: " "%d -> %d", pid, returncode) else: callback(pid, returncode, *args) class MultiLoopChildWatcher(AbstractChildWatcher): """A watcher that doesn't require running loop in the main thread. This implementation registers a SIGCHLD signal handler on instantiation (which may conflict with other code that install own handler for this signal). The solution is safe but it has a significant overhead when handling a big number of processes (*O(n)* each time a SIGCHLD is received). """ # Implementation note: # The class keeps compatibility with AbstractChildWatcher ABC # To achieve this it has empty attach_loop() method # and doesn't accept explicit loop argument # for add_child_handler()/remove_child_handler() # but retrieves the current loop by get_running_loop() def __init__(self): self._callbacks = {} self._saved_sighandler = None def is_active(self): return self._saved_sighandler is not None def close(self): self._callbacks.clear() if self._saved_sighandler is not None: handler = signal.getsignal(signal.SIGCHLD) if handler != self._sig_chld: logger.warning("SIGCHLD handler was changed by outside code") else: signal.signal(signal.SIGCHLD, self._saved_sighandler) self._saved_sighandler = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def add_child_handler(self, pid, callback, *args): loop = events.get_running_loop() self._callbacks[pid] = (loop, callback, args) # Prevent a race condition in case the child is already terminated. self._do_waitpid(pid) def remove_child_handler(self, pid): try: del self._callbacks[pid] return True except KeyError: return False def attach_loop(self, loop): # Don't save the loop but initialize itself if called first time # The reason to do it here is that attach_loop() is called from # unix policy only for the main thread. # Main thread is required for subscription on SIGCHLD signal if self._saved_sighandler is None: self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld) if self._saved_sighandler is None: logger.warning("Previous SIGCHLD handler was set by non-Python code, " "restore to default handler on watcher close.") self._saved_sighandler = signal.SIG_DFL # Set SA_RESTART to limit EINTR occurrences. signal.siginterrupt(signal.SIGCHLD, False) def _do_waitpid_all(self): for pid in list(self._callbacks): self._do_waitpid(pid) def _do_waitpid(self, expected_pid): assert expected_pid > 0 try: pid, status = os.waitpid(expected_pid, os.WNOHANG) except ChildProcessError: # The child process is already reaped # (may happen if waitpid() is called elsewhere). pid = expected_pid returncode = 255 logger.warning( "Unknown child process pid %d, will report returncode 255", pid) debug_log = False else: if pid == 0: # The child process is still alive. return returncode = _compute_returncode(status) debug_log = True try: loop, callback, args = self._callbacks.pop(pid) except KeyError: # pragma: no cover # May happen if .remove_child_handler() is called # after os.waitpid() returns. logger.warning("Child watcher got an unexpected pid: %r", pid, exc_info=True) else: if loop.is_closed(): logger.warning("Loop %r that handles pid %r is closed", loop, pid) else: if debug_log and loop.get_debug(): logger.debug('process %s exited with returncode %s', expected_pid, returncode) loop.call_soon_threadsafe(callback, pid, returncode, *args) def _sig_chld(self, signum, frame): try: self._do_waitpid_all() except (SystemExit, KeyboardInterrupt): raise except BaseException: logger.warning('Unknown exception in SIGCHLD handler', exc_info=True) class ThreadedChildWatcher(AbstractChildWatcher): """Threaded child watcher implementation. The watcher uses a thread per process for waiting for the process finish. It doesn't require subscription on POSIX signal but a thread creation is not free. The watcher has O(1) complexity, its performance doesn't depend on amount of spawn processes. """ def __init__(self): self._pid_counter = itertools.count(0) self._threads = {} def is_active(self): return True def close(self): pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def __del__(self, _warn=warnings.warn): threads = [thread for thread in list(self._threads.values()) if thread.is_alive()] if threads: _warn(f"{self.__class__} has registered but not finished child processes", ResourceWarning, source=self) def add_child_handler(self, pid, callback, *args): loop = events.get_running_loop() thread = threading.Thread(target=self._do_waitpid, name=f"waitpid-{next(self._pid_counter)}", args=(loop, pid, callback, args), daemon=True) self._threads[pid] = thread thread.start() def remove_child_handler(self, pid): # asyncio never calls remove_child_handler() !!! # The method is no-op but is implemented because # abstract base classe requires it return True def attach_loop(self, loop): pass def _do_waitpid(self, loop, expected_pid, callback, args): assert expected_pid > 0 try: pid, status = os.waitpid(expected_pid, 0) except ChildProcessError: # The child process is already reaped # (may happen if waitpid() is called elsewhere). pid = expected_pid returncode = 255 logger.warning( "Unknown child process pid %d, will report returncode 255", pid) else: returncode = _compute_returncode(status) if loop.get_debug(): logger.debug('process %s exited with returncode %s', expected_pid, returncode) if loop.is_closed(): logger.warning("Loop %r that handles pid %r is closed", loop, pid) else: loop.call_soon_threadsafe(callback, pid, returncode, *args) self._threads.pop(expected_pid) class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): """UNIX event loop policy with a watcher for child processes.""" _loop_factory = _UnixSelectorEventLoop def __init__(self): super().__init__() self._watcher = None def _init_watcher(self): with events._lock: if self._watcher is None: # pragma: no branch self._watcher = ThreadedChildWatcher() if isinstance(threading.current_thread(), threading._MainThread): self._watcher.attach_loop(self._local._loop) def set_event_loop(self, loop): """Set the event loop. As a side effect, if a child watcher was set before, then calling .set_event_loop() from the main thread will call .attach_loop(loop) on the child watcher. """ super().set_event_loop(loop) if (self._watcher is not None and isinstance(threading.current_thread(), threading._MainThread)): self._watcher.attach_loop(loop) def get_child_watcher(self): """Get the watcher for child processes. If not yet set, a ThreadedChildWatcher object is automatically created. """ if self._watcher is None: self._init_watcher() return self._watcher def set_child_watcher(self, watcher): """Set the watcher for child processes.""" assert watcher is None or isinstance(watcher, AbstractChildWatcher) if self._watcher is not None: self._watcher.close() self._watcher = watcher SelectorEventLoop = _UnixSelectorEventLoop DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument, import-outside-toplevel """Utility to compile CoreML models""" import os import shutil import tvm._ffi from ...relay.expr_functor import ExprVisitor from .. import xcode, coreml_runtime def _convert_add(builder, name, inputs, outputs, args, attrs): builder.add_elementwise(name=name, input_names=inputs, output_name=outputs[0], mode="ADD") def _convert_multiply(builder, name, inputs, outputs, args, attrs): builder.add_elementwise(name=name, input_names=inputs, output_name=outputs[0], mode="MULTIPLY") def _convert_clip(builder, name, inputs, outputs, args, attrs): builder.add_clip( name=name, input_name=inputs[0], output_name=outputs[0], min_value=attrs.a_min, max_value=attrs.a_max, ) def _convert_batch_flatten(builder, name, inputs, outputs, args, attrs): builder.add_flatten_to_2d(name=name, input_name=inputs[0], output_name=outputs[0]) def _convert_expand_dims(builder, name, inputs, outputs, args, attrs): if attrs.axis >= 0: axes = list(range(attrs.axis, attrs.axis + attrs.num_newaxis)) else: axes = list(range(attrs.axis - attrs.num_newaxis + 1, attrs.axis + 1)) builder.add_expand_dims(name=name, input_name=inputs[0], output_name=outputs[0], axes=axes) def _convert_relu(builder, name, inputs, outputs, args, attrs): builder.add_activation( name=name, non_linearity="RELU", input_name=inputs[0], output_name=outputs[0] ) def _convert_softmax(builder, name, inputs, outputs, args, attrs): builder.add_softmax_nd( name=name, input_name=inputs[0], output_name=outputs[0], axis=int(attrs["axis"]) ) def _convert_conv2d(builder, name, inputs, outputs, args, attrs): weight = args[1].data.asnumpy() if attrs["kernel_layout"] == "OIHW": # convert to 'HWIO' weight = weight.transpose([2, 3, 1, 0]) kh, kw, kc, oc = weight.shape builder.add_convolution( name=name, kernel_channels=kc, output_channels=oc, height=kh, width=kw, stride_height=int(attrs["strides"][0]), stride_width=int(attrs["strides"][0]), border_mode="valid", groups=int(attrs["groups"]), W=weight, b=None, has_bias=False, input_name=inputs[0], output_name=outputs[0], dilation_factors=[int(v) for v in attrs["dilation"]], padding_top=int(attrs["padding"][0]), padding_bottom=int(attrs["padding"][2]), padding_left=int(attrs["padding"][1]), padding_right=int(attrs["padding"][3]), ) def _convert_global_avg_pool2d(builder, name, inputs, outputs, args, attrs): builder.add_pooling( name=name, height=1, width=1, stride_height=1, stride_width=1, layer_type="AVERAGE", padding_type="VALID", input_name=inputs[0], output_name=outputs[0], is_global=True, ) _convert_map = { "add": _convert_add, "multiply": _convert_multiply, "clip": _convert_clip, "expand_dims": _convert_expand_dims, "nn.relu": _convert_relu, "nn.batch_flatten": _convert_batch_flatten, "nn.softmax": _convert_softmax, "nn.conv2d": _convert_conv2d, "nn.global_avg_pool2d": _convert_global_avg_pool2d, } class CodegenCoreML(ExprVisitor): """ A visitor to traverse subgraphs and build Core ML models. """ def __init__(self, model_name, function): import coremltools from coremltools.models.neural_network import NeuralNetworkBuilder ExprVisitor.__init__(self) self.model_name = model_name self.function = function self.out_map = {} self.model_inputs_ = [] self.buf_idx_ = 0 # Update inputs and outputs after we visit all the nodes. # Set dummy values for now. # TODO: support multiple outputs inputs = [ ( "", coremltools.models.datatypes.Array( 1, ), ) for _ in self.function.params ] outputs = [ ( "", coremltools.models.datatypes.Array( 1, ), ) ] self.builder = NeuralNetworkBuilder(inputs, outputs, disable_rank5_shape_mapping=True) def visit_constant(self, const): output = "buf_" + str(self.buf_idx_) self.builder.add_load_constant_nd( name=output, output_name=output, constant_value=const.data.asnumpy(), shape=const.data.shape, ) self.buf_idx_ = self.buf_idx_ + 1 self.out_map[const] = [output] def visit_var(self, var): name = var.name_hint shape = [int(n) for n in var.type_annotation.shape] dtype = var.type_annotation.dtype self.model_inputs_.append((name, shape, dtype)) self.out_map[var] = [name] def visit_call(self, call): inputs = [] for arg in call.args: super().visit(arg) for out in self.out_map[arg]: inputs.append(out) outputs = ["buf_" + str(self.buf_idx_)] op_name = call.op.name layer_name = op_name + "_" + str(self.buf_idx_) assert op_name in _convert_map, "{} is not supported".format(op_name) _convert_map[op_name](self.builder, layer_name, inputs, outputs, call.args, call.attrs) self.buf_idx_ = self.buf_idx_ + 1 self.out_map[call] = outputs def compile(self, out_dir): """ Build a Core ML model and compile it with Xcode toolchain. """ import coremltools from coremltools.proto.Model_pb2 import ArrayFeatureType FEATURE_TYPE_MAP = { "float32": ArrayFeatureType.FLOAT32, "float64": ArrayFeatureType.DOUBLE, "int32": ArrayFeatureType.INT32, } input_names, input_dims, input_dtypes = zip(*self.model_inputs_) self.builder.set_input(input_names, input_dims) for i, dtype in enumerate(input_dtypes): assert dtype in FEATURE_TYPE_MAP input_desc = self.builder.spec.description.input input_desc[i].type.multiArrayType.dataType = FEATURE_TYPE_MAP[dtype] output_dim = [int(n) for n in self.function.ret_type.shape] self.builder.set_output(self.out_map[self.function.body], [output_dim]) for i, dtype in enumerate([self.function.ret_type.dtype]): assert dtype in FEATURE_TYPE_MAP output_desc = self.builder.spec.description.output output_desc[i].type.multiArrayType.dataType = FEATURE_TYPE_MAP[dtype] model = coremltools.models.MLModel(self.builder.spec) xcode.compile_coreml(model, self.model_name, out_dir) @tvm._ffi.register_func("relay.ext.coremlcompiler") def coreml_compiler(func): """ Create a CoreML runtime from a Relay module. """ assert isinstance(func, tvm.relay.function.Function) model_dir = os.getcwd() name = str(func.attrs.global_symbol) builder = CodegenCoreML(name, func) builder.visit(func.body) mlmodelc_path = "{}/{}.mlmodelc".format(model_dir, name) if os.path.exists(mlmodelc_path): shutil.rmtree(mlmodelc_path) builder.compile(model_dir) ctx = tvm.cpu(0) return coreml_runtime.create(name, mlmodelc_path, ctx).module
from neo.Prompt.Commands.Wallet import CommandWallet from neo.Prompt.Commands.tests.test_wallet_commands import UserWalletTestCaseBase from neo.Implementations.Wallets.peewee.UserWallet import UserWallet from neo.Prompt.PromptData import PromptData from neo.Prompt.Commands.WalletAddress import SplitUnspentCoin, CreateAddress from neo.Core.TX.Transaction import ContractTransaction from neo.Core.Fixed8 import Fixed8 from mock import patch from io import StringIO from neo.Network.nodemanager import NodeManager from neo.Network.node import NeoNode class UserWalletTestCase(UserWalletTestCaseBase): @classmethod def setUpClass(cls): super().setUpClass() def test_wallet_create_address(self): # test wallet create address with no wallet open args = ['address', 'create', 1] res = CommandWallet().execute(args) self.assertFalse(res) self.OpenWallet1() # test wallet create address with no argument args = ['address', 'create'] res = CommandWallet().execute(args) self.assertFalse(res) # test wallet create address with negative number args = ['address', 'create', -1] res = CommandWallet().execute(args) self.assertFalse(res) # test wallet create address successful args = ['address', 'create', 1] res = CommandWallet().execute(args) self.assertTrue(res) self.assertEqual(type(res), UserWallet) self.assertEqual(len(res.Addresses), 2) # Has one address when created. args = ['address', 'create', 7] res = CommandWallet().execute(args) self.assertTrue(res) self.assertEqual(type(res), UserWallet) self.assertEqual(len(res.Addresses), 9) def test_wallet_delete_address(self): # test wallet delete address with no wallet open args = ['address', 'delete'] res = CommandWallet().execute(args) self.assertFalse(res) self.OpenWallet1() # test wallet delete address with no argument args = ['address', 'delete'] res = CommandWallet().execute(args) self.assertFalse(res) # test wallet delete address with invalid address args = ['address', 'delete', '1234'] res = CommandWallet().execute(args) self.assertFalse(res) # test wallet delete address with unknown address args = ['address', 'delete', self.watch_addr_str] res = CommandWallet().execute(args) self.assertFalse(res) # test wallet delete successful self.assertTrue(len(PromptData.Wallet.Addresses), 1) args = ['address', 'delete', PromptData.Wallet.Addresses[0]] res = CommandWallet().execute(args) self.assertTrue(res) self.assertEqual(type(res), bool) self.assertEqual(len(PromptData.Wallet.Addresses), 0) def test_wallet_alias(self): # test wallet alias with no wallet open args = ['address', 'alias', 'AJQ6FoaSXDFzA6wLnyZ1nFN7SGSN2oNTc3', 'mine'] res = CommandWallet().execute(args) self.assertFalse(res) self.OpenWallet1() # test wallet alias with no argument args = ['address', 'alias'] res = CommandWallet().execute(args) self.assertFalse(res) # test wallet alias with 1 argument args = ['address', 'alias', 'AJQ6FoaSXDFzA6wLnyZ1nFN7SGSN2oNTc3'] res = CommandWallet().execute(args) self.assertFalse(res) # verify wallet has no aliases with patch('sys.stdout', new=StringIO()) as mock_print: args = [""] res = CommandWallet().execute(args) self.assertTrue(res) self.assertEqual(len(PromptData.Wallet.NamedAddr), 0) self.assertNotIn("Alias", mock_print.getvalue()) # test wallet alias successful self.assertNotIn('mine', [n.Title for n in PromptData.Wallet.NamedAddr]) args = ['address', 'alias', 'AJQ6FoaSXDFzA6wLnyZ1nFN7SGSN2oNTc3', 'mine'] res = CommandWallet().execute(args) self.assertTrue(res) self.assertIn('mine', [n.Title for n in PromptData.Wallet.NamedAddr]) with patch('sys.stdout', new=StringIO()) as mock_print: args = [""] res = CommandWallet().execute(args) self.assertTrue(res) self.assertIn("Alias : mine", mock_print.getvalue()) def test_6_split_unspent(self): wallet = self.GetWallet1(recreate=True) addr = wallet.ToScriptHash('AJQ6FoaSXDFzA6wLnyZ1nFN7SGSN2oNTc3') nodemgr = NodeManager() nodemgr.nodes = [NeoNode(object, object)] with patch('neo.Network.node.NeoNode.relay', return_value=self.async_return(True)): # bad inputs tx = SplitUnspentCoin(None, self.NEO, addr, 0, 2) self.assertEqual(tx, None) tx = SplitUnspentCoin(wallet, self.NEO, addr, 3, 2) self.assertEqual(tx, None) tx = SplitUnspentCoin(wallet, 'bla', addr, 0, 2) self.assertEqual(tx, None) # should be ok with patch('neo.Prompt.Commands.WalletAddress.prompt', return_value=self.wallet_1_pass()): tx = SplitUnspentCoin(wallet, self.NEO, addr, 0, 2) self.assertIsNotNone(tx) # rebuild wallet and try with non-even amount of neo, should be split into integer values of NEO wallet = self.GetWallet1(True) tx = SplitUnspentCoin(wallet, self.NEO, addr, 0, 3) self.assertIsNotNone(tx) self.assertEqual([Fixed8.FromDecimal(17), Fixed8.FromDecimal(17), Fixed8.FromDecimal(16)], [item.Value for item in tx.outputs]) # try with gas wallet = self.GetWallet1(True) tx = SplitUnspentCoin(wallet, self.GAS, addr, 0, 3) self.assertIsNotNone(tx) def test_7_create_address(self): # no wallet res = CreateAddress(None, 1) self.assertFalse(res) wallet = self.GetWallet1(recreate=True) # not specifying a number of addresses res = CreateAddress(wallet, None) self.assertFalse(res) # bad args res = CreateAddress(wallet, "blah") self.assertFalse(res) # negative value res = CreateAddress(wallet, -1) self.assertFalse(res) # should pass res = CreateAddress(wallet, 2) self.assertTrue(res) self.assertEqual(len(wallet.Addresses), 3) class UserWalletSplitTestCase(UserWalletTestCaseBase): @classmethod def setUpClass(cls): super().setUpClass() def test_wallet_split(self): # test wallet split with no wallet open with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("open a wallet", mock_print.getvalue()) self.OpenWallet1() # test wallet split with not enough arguments with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("specify the required parameters", mock_print.getvalue()) # test wallet split with too much arguments with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2', 'too', 'much'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Too many parameters supplied", mock_print.getvalue()) # test wallet split with invalid address with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', '123', 'neo', '0', '2'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Invalid address specified", mock_print.getvalue()) # test wallet split with unknown asset with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'unknownasset', '0', '2'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Unknown asset id", mock_print.getvalue()) # test wallet split with invalid index with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', 'abc', '2'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Invalid unspent index value", mock_print.getvalue()) # test wallet split with invalid divisions with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', 'abc'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Invalid divisions value", mock_print.getvalue()) # test wallet split with invalid divisions (negative) with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '-3'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Divisions cannot be lower than 2", mock_print.getvalue()) # test wallet split with invalid fee with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2', 'abc'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Invalid fee value", mock_print.getvalue()) # test wallet split with negative fee with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2', '-0.01'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Invalid fee value", mock_print.getvalue()) # test wallet split with wrong password with patch('neo.Prompt.Commands.WalletAddress.prompt', side_effect=["wrong_password"]): with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("incorrect password", mock_print.getvalue()) # test wallet split with keyboard interrupt with patch('neo.Prompt.Commands.WalletAddress.prompt', side_effect=[KeyboardInterrupt]): with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Splitting cancelled", mock_print.getvalue()) # test wallet split with fee bigger than the outputs with patch('neo.Prompt.Commands.WalletAddress.prompt', side_effect=[self.wallet_1_pass()]): with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2', '100'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Fee could not be subtracted from outputs", mock_print.getvalue()) # # test wallet split with error during tx relay nodemgr = NodeManager() nodemgr.reset_for_test() nodemgr.nodes = [NeoNode(object, object)] with patch('neo.Prompt.Commands.WalletAddress.prompt', side_effect=[self.wallet_1_pass()]): with patch('neo.Network.node.NeoNode.relay', return_value=self.async_return(False)): with patch('sys.stdout', new=StringIO()) as mock_print: args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2'] res = CommandWallet().execute(args) self.assertIsNone(res) self.assertIn("Could not relay tx", mock_print.getvalue()) # we have to clear the mempool because the previous test alread put a TX with the same hash in the mempool and so it will not try to relay again nodemgr.mempool.reset() # test wallet split neo successful with patch('neo.Prompt.Commands.WalletAddress.prompt', side_effect=[self.wallet_1_pass()]): with patch('neo.Network.node.NeoNode.relay', return_value=self.async_return(True)): args = ['address', 'split', self.wallet_1_addr, 'neo', '0', '2'] tx = CommandWallet().execute(args) self.assertIsInstance(tx, ContractTransaction) self.assertEqual([Fixed8.FromDecimal(25), Fixed8.FromDecimal(25)], [item.Value for item in tx.outputs]) # test wallet split gas successful with patch('neo.Prompt.Commands.WalletAddress.prompt', side_effect=[self.wallet_1_pass()]): with patch('neo.Network.node.NeoNode.relay', return_value=self.async_return(True)): args = ['address', 'split', self.wallet_1_addr, 'gas', '0', '3'] tx = CommandWallet().execute(args) self.assertIsInstance(tx, ContractTransaction) self.assertEqual(len(tx.outputs), 3) nodemgr.reset_for_test()
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for registration mechanisms.""" from tensorflow.python.framework import tensor_shape from tensorflow.python.ops.linalg import cholesky_registrations # pylint: disable=unused-import from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import matmul_registrations # pylint: disable=unused-import from tensorflow.python.ops.linalg import solve_registrations # pylint: disable=unused-import from tensorflow.python.platform import test # pylint: disable=protected-access _ADJOINTS = linear_operator_algebra._ADJOINTS _registered_adjoint = linear_operator_algebra._registered_adjoint _CHOLESKY_DECOMPS = linear_operator_algebra._CHOLESKY_DECOMPS _registered_cholesky = linear_operator_algebra._registered_cholesky _INVERSES = linear_operator_algebra._INVERSES _registered_inverse = linear_operator_algebra._registered_inverse _MATMUL = linear_operator_algebra._MATMUL _registered_matmul = linear_operator_algebra._registered_matmul _SOLVE = linear_operator_algebra._SOLVE _registered_solve = linear_operator_algebra._registered_solve # pylint: enable=protected-access class AdjointTest(test.TestCase): def testRegistration(self): class CustomLinOp(linear_operator.LinearOperator): def _matmul(self, a): pass def _shape(self): return tensor_shape.TensorShape([1, 1]) def _shape_tensor(self): pass # Register Adjoint to a lambda that spits out the name parameter @linear_operator_algebra.RegisterAdjoint(CustomLinOp) def _adjoint(a): # pylint: disable=unused-argument,unused-variable return "OK" self.assertEqual("OK", CustomLinOp(dtype=None).adjoint()) def testRegistrationFailures(self): class CustomLinOp(linear_operator.LinearOperator): pass with self.assertRaisesRegex(TypeError, "must be callable"): linear_operator_algebra.RegisterAdjoint(CustomLinOp)("blah") # First registration is OK linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None) # Second registration fails with self.assertRaisesRegex(ValueError, "has already been registered"): linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None) def testExactAdjointRegistrationsAllMatch(self): for (k, v) in _ADJOINTS.items(): self.assertEqual(v, _registered_adjoint(k[0])) class CholeskyTest(test.TestCase): def testRegistration(self): class CustomLinOp(linear_operator.LinearOperator): def _matmul(self, a): pass def _shape(self): return tensor_shape.TensorShape([1, 1]) def _shape_tensor(self): pass # Register Cholesky to a lambda that spits out the name parameter @linear_operator_algebra.RegisterCholesky(CustomLinOp) def _cholesky(a): # pylint: disable=unused-argument,unused-variable return "OK" with self.assertRaisesRegex(ValueError, "positive definite"): CustomLinOp(dtype=None, is_self_adjoint=True).cholesky() with self.assertRaisesRegex(ValueError, "self adjoint"): CustomLinOp(dtype=None, is_positive_definite=True).cholesky() custom_linop = CustomLinOp( dtype=None, is_self_adjoint=True, is_positive_definite=True) self.assertEqual("OK", custom_linop.cholesky()) def testRegistrationFailures(self): class CustomLinOp(linear_operator.LinearOperator): pass with self.assertRaisesRegex(TypeError, "must be callable"): linear_operator_algebra.RegisterCholesky(CustomLinOp)("blah") # First registration is OK linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None) # Second registration fails with self.assertRaisesRegex(ValueError, "has already been registered"): linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None) def testExactCholeskyRegistrationsAllMatch(self): for (k, v) in _CHOLESKY_DECOMPS.items(): self.assertEqual(v, _registered_cholesky(k[0])) class MatmulTest(test.TestCase): def testRegistration(self): class CustomLinOp(linear_operator.LinearOperator): def _matmul(self, a): pass def _shape(self): return tensor_shape.TensorShape([1, 1]) def _shape_tensor(self): pass # Register Matmul to a lambda that spits out the name parameter @linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp) def _matmul(a, b): # pylint: disable=unused-argument,unused-variable return "OK" custom_linop = CustomLinOp( dtype=None, is_self_adjoint=True, is_positive_definite=True) self.assertEqual("OK", custom_linop.matmul(custom_linop)) def testRegistrationFailures(self): class CustomLinOp(linear_operator.LinearOperator): pass with self.assertRaisesRegex(TypeError, "must be callable"): linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp)("blah") # First registration is OK linear_operator_algebra.RegisterMatmul( CustomLinOp, CustomLinOp)(lambda a: None) # Second registration fails with self.assertRaisesRegex(ValueError, "has already been registered"): linear_operator_algebra.RegisterMatmul( CustomLinOp, CustomLinOp)(lambda a: None) def testExactMatmulRegistrationsAllMatch(self): for (k, v) in _MATMUL.items(): self.assertEqual(v, _registered_matmul(k[0], k[1])) class SolveTest(test.TestCase): def testRegistration(self): class CustomLinOp(linear_operator.LinearOperator): def _matmul(self, a): pass def _solve(self, a): pass def _shape(self): return tensor_shape.TensorShape([1, 1]) def _shape_tensor(self): pass # Register Solve to a lambda that spits out the name parameter @linear_operator_algebra.RegisterSolve(CustomLinOp, CustomLinOp) def _solve(a, b): # pylint: disable=unused-argument,unused-variable return "OK" custom_linop = CustomLinOp( dtype=None, is_self_adjoint=True, is_positive_definite=True) self.assertEqual("OK", custom_linop.solve(custom_linop)) def testRegistrationFailures(self): class CustomLinOp(linear_operator.LinearOperator): pass with self.assertRaisesRegex(TypeError, "must be callable"): linear_operator_algebra.RegisterSolve(CustomLinOp, CustomLinOp)("blah") # First registration is OK linear_operator_algebra.RegisterSolve( CustomLinOp, CustomLinOp)(lambda a: None) # Second registration fails with self.assertRaisesRegex(ValueError, "has already been registered"): linear_operator_algebra.RegisterSolve( CustomLinOp, CustomLinOp)(lambda a: None) def testExactSolveRegistrationsAllMatch(self): for (k, v) in _SOLVE.items(): self.assertEqual(v, _registered_solve(k[0], k[1])) class InverseTest(test.TestCase): def testRegistration(self): class CustomLinOp(linear_operator.LinearOperator): def _matmul(self, a): pass def _shape(self): return tensor_shape.TensorShape([1, 1]) def _shape_tensor(self): pass # Register Inverse to a lambda that spits out the name parameter @linear_operator_algebra.RegisterInverse(CustomLinOp) def _inverse(a): # pylint: disable=unused-argument,unused-variable return "OK" with self.assertRaisesRegex(ValueError, "singular"): CustomLinOp(dtype=None, is_non_singular=False).inverse() self.assertEqual("OK", CustomLinOp( dtype=None, is_non_singular=True).inverse()) def testRegistrationFailures(self): class CustomLinOp(linear_operator.LinearOperator): pass with self.assertRaisesRegex(TypeError, "must be callable"): linear_operator_algebra.RegisterInverse(CustomLinOp)("blah") # First registration is OK linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None) # Second registration fails with self.assertRaisesRegex(ValueError, "has already been registered"): linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None) def testExactRegistrationsAllMatch(self): for (k, v) in _INVERSES.items(): self.assertEqual(v, _registered_inverse(k[0])) if __name__ == "__main__": test.main()
""" borrowed from jython https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py """ import errno import os class error(Exception): pass ALL = None _exception_map = {} def _map_exception(exc, circumstance=ALL): try: mapped_exception = _exception_map[(exc.__class__, circumstance)] mapped_exception.java_exception = exc return mapped_exception except KeyError: return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance)) POLLIN = 1 POLLOUT = 2 # The following event types are completely ignored on jython # Java does not support them, AFAICT # They are declared only to support code compatibility with cpython POLLPRI = 4 POLLERR = 8 POLLHUP = 16 POLLNVAL = 32 def _getselectable(selectable_object): try: channel = selectable_object.getchannel() except: try: channel = selectable_object.fileno().getChannel() except: raise TypeError("Object '%s' is not watchable" % selectable_object, errno.ENOTSOCK) return channel # Fake Selector class Selector: def close(self): pass def keys(self): return [] def select(self, timeout=None): return [] def selectedKeys(self): class SelectedKeys: def iterator(self): return [] return SelectedKeys() def selectNow(self, timeout=None): return [] class poll: def __init__(self): self.selector = Selector() self.chanmap = {} self.unconnected_sockets = [] def _register_channel(self, socket_object, channel, mask): jmask = 0 if mask & POLLIN: # Note that OP_READ is NOT a valid event on server socket channels. if channel.validOps() & OP_ACCEPT: jmask = OP_ACCEPT else: jmask = OP_READ if mask & POLLOUT: if channel.validOps() & OP_WRITE: jmask |= OP_WRITE if channel.validOps() & OP_CONNECT: jmask |= OP_CONNECT selectionkey = channel.register(self.selector, jmask) self.chanmap[channel] = (socket_object, selectionkey) def _check_unconnected_sockets(self): temp_list = [] for socket_object, mask in self.unconnected_sockets: channel = _getselectable(socket_object) if channel is not None: self._register_channel(socket_object, channel, mask) else: temp_list.append( (socket_object, mask) ) self.unconnected_sockets = temp_list def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI): try: channel = _getselectable(socket_object) if channel is None: # The socket is not yet connected, and thus has no channel # Add it to a pending list, and return self.unconnected_sockets.append( (socket_object, mask) ) return self._register_channel(socket_object, channel, mask) except BaseException as exc: raise _map_exception(exc) def unregister(self, socket_object): try: channel = _getselectable(socket_object) self.chanmap[channel][1].cancel() del self.chanmap[channel] except BaseException as exc: raise _map_exception(exc) def _dopoll(self, timeout): if timeout is None or timeout < 0: self.selector.select() else: try: timeout = int(timeout) if not timeout: self.selector.selectNow() else: # No multiplication required: both cpython and java use millisecond timeouts self.selector.select(timeout) except ValueError as vx: raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL) # The returned selectedKeys cannot be used from multiple threads! return self.selector.selectedKeys() def poll(self, timeout=None): return [] def _deregister_all(self): try: for k in self.selector.keys(): k.cancel() # Keys are not actually removed from the selector until the next select operation. self.selector.selectNow() except BaseException as exc: raise _map_exception(exc) def close(self): try: self._deregister_all() self.selector.close() except BaseException as exc: raise _map_exception(exc) def _calcselecttimeoutvalue(value): if value is None: return None try: floatvalue = float(value) except Exception as x: raise TypeError("Select timeout value must be a number or None") if value < 0: raise error("Select timeout value cannot be negative", errno.EINVAL) if floatvalue < 0.000001: return 0 return int(floatvalue * 1000) # Convert to milliseconds # This cache for poll objects is required because of a bug in java on MS Windows # http://bugs.jython.org/issue1291 class poll_object_cache: def __init__(self): self.is_windows = os.name == 'nt' if self.is_windows: self.poll_object_queue = Queue.Queue() import atexit atexit.register(self.finalize) def get_poll_object(self): if not self.is_windows: return poll() try: return self.poll_object_queue.get(False) except Queue.Empty: return poll() def release_poll_object(self, pobj): if self.is_windows: pobj._deregister_all() self.poll_object_queue.put(pobj) else: pobj.close() def finalize(self): if self.is_windows: while True: try: p = self.poll_object_queue.get(False) p.close() except Queue.Empty: return _poll_object_cache = poll_object_cache() def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None): timeout = _calcselecttimeoutvalue(timeout) # First create a poll object to do the actual watching. pobj = _poll_object_cache.get_poll_object() try: registered_for_read = {} # Check the read list for fd in read_fd_list: pobj.register(fd, POLLIN) registered_for_read[fd] = 1 # And now the write list for fd in write_fd_list: if fd in registered_for_read: # registering a second time overwrites the first pobj.register(fd, POLLIN|POLLOUT) else: pobj.register(fd, POLLOUT) results = pobj.poll(timeout) # Now start preparing the results read_ready_list, write_ready_list, oob_ready_list = [], [], [] for fd, mask in results: if mask & POLLIN: read_ready_list.append(fd) if mask & POLLOUT: write_ready_list.append(fd) return read_ready_list, write_ready_list, oob_ready_list finally: _poll_object_cache.release_poll_object(pobj) select = native_select def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None): # First turn all sockets to non-blocking # keeping track of which ones have changed modified_channels = [] try: for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]: for s in socket_list: channel = _getselectable(s) if channel.isBlocking(): modified_channels.append(channel) channel.configureBlocking(0) return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout) finally: for channel in modified_channels: channel.configureBlocking(1)
""" Module for support V_sim ascii fileformat Contains routines to load .ascii files and create pychemia Structure objects and to save back to .ascii files This code was originally created for ASE """ import re as _re from pychemia.utils.constants import bohr_angstrom from pychemia.core import Structure def load(filep): """ Read an V_sim .ascii file and returns a pychemia Structure object Args: filep: (string) Path to a .ascii file or an actual file-like object Returns: struct: (object) A pychemia Structure object """ if isinstance(filep, str): f = open(filep) else: # Assume it's a file-like object f = filep comment = f.readline() line = f.readline() + ' ' + f.readline() box = line.split() for i in range(len(box)): box[i] = float(box[i]) keywords = [] positions = [] symbols = [] re_comment = _re.compile('^\s*[#!]') re_node = _re.compile('^\s*\S+\s+\S+\s+\S+\s+\S+') while True: line = f.readline() if line == '': break # EOF -> Exit p = re_comment.match(line) if p is not None: # remove comment character at the beginning of line line = line[p.end():].replace(',', ' ').lower() if line[:8] == "keyword:": keywords.extend(line[8:].split()) elif re_node.match(line): unit = 1.0 if not ("reduced" in keywords): if ("bohr" in keywords) or ("bohrd0" in keywords) or ("atomic" in keywords) or ("atomicd0" in keywords): unit = bohr_angstrom fields = line.split() positions.append([unit * float(fields[0]), unit * float(fields[1]), unit * float(fields[2])]) symbols.append(fields[3]) f.close() if ("surface" in keywords) or ("freeBC" in keywords): raise NotImplementedError # create atoms object based on the information if "angdeg" in keywords: cell = cell.par_to_cell(box) else: unit = 1.0 if ("bohr" in keywords) or ("bohrd0" in keywords) or ("atomic" in keywords) or ("atomicd0" in keywords): unit = bohr_angstrom cell = [[unit * box[0], 0.0, 0.0], [unit * box[1], unit * box[2], 0.0], [unit * box[3], unit * box[4], unit * box[5]]] if "reduced" in keywords: struct = Structure(cell=cell, reduced=positions, symbols=symbols, name=comment) else: struct = Structure(cell=cell, positions=positions, symbols=symbols, name=comment) return struct def save(struct, filep, cartesian=True, long_format=True, angdeg=False): """ Saves a pychemia Structure object in V_sim .ascii fileformat in the simplest way, i.e. using all defaults with no optional keywords. In the first line we add the number of atoms, as this is used by certain code """ if isinstance(filep, str): f = open(filep, 'w') else: # Assume it's a 'file-like object' f = filep # write header (treated as a comment by v_sim f.write("%s\n" % struct.name) # write cell cell = struct.cell if angdeg: ddd = cell_to_cellpar(cell) else: ddd = cell_to_reduced(cell) f.write("%.14f %.14f %.14f\n" % (ddd[0], ddd[1], ddd[2])) f.write("%.14f %.14f %.14f\n" % (ddd[3], ddd[4], ddd[5])) if angdeg: f.write("#keyword: angdeg\n") # Write atom positions in scaled or cartesian coordinates if cartesian: coord = struct.positions else: f.write("#keyword: reduced\n") coord = struct.reduced if long_format: cform = ' %19.16f' else: cform = ' %9.6f' symbols = struct.symbols for iatom, atom in enumerate(coord): f.write(' ') for dcoord in atom: f.write(cform % dcoord) f.write(' ' + symbols[iatom] + '\n') def cell_to_reduced(full): """ Transforms the given matrix full into a reduced array used by V_Sim to store box definition. translated from src/coreTools/toolMatrix.c subroutine tool_matrix_reducePrimitiveVectors """ from numpy import zeros from numpy.linalg import norm xcoord = full[0].copy() # Compute the Y vector of the new basis, orthogonal to xcoord an coplanar with xcoord and old y vector u = zeros(3) u[0] = full[0][1] * full[1][2] - full[0][2] * full[1][1] u[1] = full[0][2] * full[1][0] - full[0][0] * full[1][2] u[2] = full[0][0] * full[1][1] - full[0][1] * full[1][0] deltaij = xcoord[0] * u[1] - xcoord[1] * u[0] if deltaij != 0.0: i = 0 j = 1 k = 2 else: deltaij = xcoord[0] * u[2] - xcoord[2] * u[0] if deltaij != 0.0: i = 0 j = 2 k = 1 else: deltaij = xcoord[1] * u[2] - xcoord[2] * u[1] if deltaij != 0.0: i = 1 j = 2 k = 0 else: # Error return None y = zeros(3) y[k] = -1.0 y[i] = (xcoord[k] * u[j] - xcoord[j] * u[k]) / deltaij y[j] = (xcoord[i] * u[k] - xcoord[k] * u[i]) / deltaij # We need to turn Y if y.Y is negative fnorm = full[1][0] * y[0] + full[1][1] * y[1] + full[1][2] * y[2] if fnorm < 0.0: y *= -1. # Compute the new Z vector in order to form a direct orthogonal # basis with xcoord and Y z = zeros(3) z[0] = xcoord[1] * y[2] - xcoord[2] * y[1] z[1] = xcoord[2] * y[0] - xcoord[0] * y[2] z[2] = xcoord[0] * y[1] - xcoord[1] * y[0] # Normalize vectors xcoord /= norm(xcoord) y /= norm(y) z /= norm(z) # Compute the reduce value for the basis. reduced = zeros(6) reduced[0] = xcoord[0] * full[0][0] + xcoord[1] * full[0][1] + xcoord[2] * full[0][2] reduced[1] = xcoord[0] * full[1][0] + xcoord[1] * full[1][1] + xcoord[2] * full[1][2] reduced[2] = y[0] * full[1][0] + y[1] * full[1][1] + y[2] * full[1][2] reduced[3] = xcoord[0] * full[2][0] + xcoord[1] * full[2][1] + xcoord[2] * full[2][2] reduced[4] = y[0] * full[2][0] + y[1] * full[2][1] + y[2] * full[2][2] reduced[5] = z[0] * full[2][0] + z[1] * full[2][1] + z[2] * full[2][2] return reduced
import boto import datetime import json import logging import os import time from boto.ec2.autoscale import AutoScaleConnection from boto.ec2.autoscale import Tag # needed for Phantom from boto.ec2.autoscale.group import AutoScalingGroup from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.regioninfo import RegionInfo from lib.config import CloudConfig from lib.config import VALID_RUN_STATES from lib.util import Command from lib.util import read_file from lib.util import write_file # supress most boto logging logging.getLogger('boto').setLevel(logging.CRITICAL) LOG = logging.getLogger(__name__) class Cloud(object): def __init__(self, cloud_config): self.config = cloud_config self.all_instances = [] self.failed_launch = False self.failed_count = 0 self.failed_last_valid_count = 0 self._conn = None self._as_conn = None self._lc = None self._asg = None self._last_asg_launch_attempt = None self.maxed = False self._last_launch_attempt = datetime.datetime.utcnow() self._initialize() def _create_connection(self): LOG.debug("Creating connection for %s" % self.config.name) self._conn = boto.connect_ec2(self.config.access_id, self.config.secret_key, validate_certs=False) self._conn.host = self.config.cloud_uri self._conn.port = self.config.cloud_port def _create_autoscale_connection(self): LOG.debug("Creating autoscale connection for %s" % self.config.name) region = RegionInfo(name=self.config.cloud_type, endpoint=self.config.as_uri) self._as_conn = AutoScaleConnection( aws_access_key_id=self.config.access_id, aws_secret_access_key=self.config.secret_key, is_secure=True, port=self.config.as_port, region=region, validate_certs=False) def _create_or_set_launch_configuration(self): name = self.config.lc_name if not self._lc: LOG.debug("Attempting to load launch configuration: %s" % (name)) lc = self._as_conn.get_all_launch_configurations(names=[name]) if len(lc) == 1: LOG.debug("Launch configuration %s found." % (name)) self._lc = lc[0] if not self._lc: #TODO(pdmars): key and security groups are hardcoded for now, gross if self.config.user_data_file is not None: user_data_file = self.config.user_data_file with open(user_data_file) as f: user_data = f.read() else: user_data = None LOG.debug("Creating launch configuration %s" % name) LOG.debug("\tname: %s" % name) LOG.debug("\timage_id: %s" % self.config.image_id) LOG.debug("\tinstance_type: %s" % self.config.instance_type) LOG.debug("\tuser_data: %s" % user_data) self._lc = LaunchConfiguration( name=name, image_id=self.config.image_id, key_name="phantomkey", security_groups=['default'], instance_type=self.config.instance_type, user_data=user_data) self._as_conn.create_launch_configuration(self._lc) def _create_or_set_autoscale_group(self): name = self.config.asg_name if not self._asg: LOG.debug("Attempting to load autoscale group: %s" % name) asg = self._as_conn.get_all_groups(names=[name]) LOG.debug("Autoscale group: %s" % asg) if len(asg) == 1: LOG.debug("Autoscale group %s found." % name) self._asg = asg[0] if not self._asg: # TODO(pdmars): more hard coded grossness, for now try: cloud_guess = self.config.lc_name.split("@")[1].strip() except Exception as e: LOG.warn("Unable to guess cloud for auto scale tags") LOG.warn("Setting cloud to hotel") cloud_guess = "hotel" policy_name_key = "PHANTOM_DEFINITION" policy_name = "error_overflow_n_preserving" ordered_clouds_key = "clouds" n_preserve_key = "minimum_vms" ordered_clouds = cloud_guess + ":-1" n_preserve = 0 policy_tag = Tag(connection=self._as_conn, key=policy_name_key, value=policy_name, resource_id=name) clouds_tag = Tag(connection=self._as_conn, key=ordered_clouds_key, value=ordered_clouds, resource_id=name) npreserve_tag = Tag(connection=self._as_conn, key=n_preserve_key, value=n_preserve, resource_id=name) tags = [policy_tag, clouds_tag, npreserve_tag] zones = [self.config.az] LOG.debug("Creating autoscale group %s" % name) LOG.debug("\tname: %s" % name) LOG.debug("\tavailability_zones: %s" % zones) LOG.debug("\tlaunch_config: %s" % self._lc) self._asg = AutoScalingGroup(group_name=name, availability_zones=zones, min_size=0, max_size=0, launch_config=self._lc, tags=tags) self._as_conn.create_auto_scaling_group(self._asg) def _initialize(self): LOG.debug("Initializing %s" % self.config.name) self._create_connection() self._create_autoscale_connection() self._create_or_set_launch_configuration() self._create_or_set_autoscale_group() LOG.debug("Initialization complete for %s" % self.config.name) def get_valid_instances(self): return self.all_instances def _refresh_instances(self): LOG.debug("%s: getting instance information" % self.config.name) self.all_instances = [] instances = [] as_instances = self._asg.instances as_instance_ids = [i.instance_id for i in as_instances] reservations = self._conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.id in as_instance_ids: if instance.state in VALID_RUN_STATES: instances.append(instance) for instance in instances: self.all_instances.append(instance) num_instances = len(self.all_instances) LOG.debug("%s: updated %d instances" % (self.config.name, num_instances)) if num_instances >= self.config.max_instances: LOG.warn("%s reached the max (%s) instances: %s" % ( self.config.name, self.config.max_instances, num_instances)) self.maxed = True else: self.maxed = False def _refresh_asg(self): LOG.debug("%s: refreshing autoscale group" % self.config.name) asg_name = self.config.asg_name asgs = self._as_conn.get_all_groups(names=[asg_name]) if len(asgs) == 1: self._asg = asgs[0] LOG.debug("\trefreshed autoscale group: %s" % asg_name) else: LOG.warn("\tunable to refresh autoscale group: %s" % asg_name) def refresh(self, cluster): self._refresh_asg() self._refresh_instances() def get_total_num_valid_cores(self): LOG.debug("%s: getting number of valid cores" % self.config.name) total_num_valid_cores = 0 num_valid_instances = len(self.get_valid_instances()) total_valid_cores = num_valid_instances * self.config.instance_cores num_desired_instances = self._asg.desired_capacity num_desired_cores = num_desired_instances * self.config.instance_cores if num_desired_cores != total_num_valid_cores: LOG.debug("\tmismatching core counts") LOG.debug("\tnum_desired_cores: %d" % (num_desired_cores)) LOG.debug("\ttotal_valid_cores: %d" % (total_valid_cores)) return total_valid_cores def get_instance_by_id(self, id): LOG.debug("Searching for instance %s" % id) for instances in self.all_instances: if instance.id == id: LOG.debug("Found instance %s" % id) return instance return None def get_instance_ids_for_public_dns_names(self, public_dns_names): instance_ids = [] for instance in self.all_instances: if instance.public_dns_name in public_dns_names: instance_ids.append(instance.id) return instance_ids def get_public_dns_names_close_to_charge(self): instances_close_to_charge = [] sleep_secs = self.config.get_loop_sleep_secs() cur_utc_time = datetime.datetime.utcnow() valid_instances = self.get_valid_instances() time_fmt = "%Y-%m-%dT%H:%M:%S.%fZ" for instance in valid_instances: launch_time = datetime.datetime.strptime(instance.launch_time, time_fmt) time_diff = cur_utc_time - launch_time # Ignores microseconds time_diff_secs = time_diff.seconds + time_diff.days * 24 * 3600 cur_charge_secs = time_diff_secs % self.config.charge_time_secs secs_to_charge = self.config.charge_time_secs - cur_charge_secs LOG.debug("%s:%s: charge: %d; current: %d; to charge: %d" % ( instance.id, instance.public_dns_name, self.config.charge_time_secs, cur_charge_secs, secs_to_charge)) if secs_to_charge < (3 * sleep_secs): instances_close_to_charge.append(instance.public_dns_name) return instances_close_to_charge def delete_instances(self, instance_ids=[]): if not instance_ids: return LOG.debug("Deleting instances: %s" % instance_ids) # TODO(pdmars): this has the potential to kill instances running jobs # maybe I should err on the side of having extra instances if the # capacity is higher than the cloud can currently support num_instances = len(self.all_instances) if ((self._asg.desired_capacity > num_instances) and (num_instances > 0)): LOG.warn("Desired capacity is greater than num_instances running") LOG.warn("Adjusting desired capacity to match") self.set_capacity(num_instances) for instance_id in instance_ids: self._as_conn.terminate_instance(instance_id) # TODO(pdmars): due to a bug in phantom, maybe this will help # 2013/04/05: this might not be relevant anymore time.sleep(.1) def launch_autoscale_instances(self, num_instances=1): new_capacity = self._asg.desired_capacity + int(num_instances) if new_capacity > self.config.max_instances: new_capacity = self.config.max_instances LOG.warn("%s can launch %s total instances" % (self.config.name, new_capacity)) self._last_launch_attempt = datetime.datetime.utcnow() LOG.debug("Setting cloud capacity for %s to %s" % (self.config.name, new_capacity)) self.set_capacity(new_capacity) def set_capacity(self, new_capacity): self._asg.set_capacity(new_capacity) class Clouds(object): def __init__(self, cloud_names, global_config): self.cloud_names = cloud_names self._global_config = global_config self.clouds = {} self._clouds_low_to_high = [] self._instances_out_of_date = [] self._initialize() def _create_cloud_from_config(self, name): return Cloud(CloudConfig(name, self._global_config)) def _get_clouds_ordered_by_price(self, descending=False): clouds = self.clouds.values() sorted_clouds = sorted(clouds, key=lambda x: x.config.price, reverse=descending) return sorted_clouds def _initialize(self): LOG.debug("Initializing all clouds") for name in self.cloud_names: c = self._create_cloud_from_config(name) self.clouds[name] = c LOG.debug("Sorting clouds by price (low to high)") self._clouds_low_to_high = self._get_clouds_ordered_by_price() def get_cheapest_valid_cloud(self): clouds = self._clouds_low_to_high for cloud in clouds: if (not cloud.failed_launch) and (not cloud.maxed): return cloud def get_clouds_low_to_high(self): return self._clouds_low_to_high def get_total_num_valid_cores(self): total_num_valid_cores = 0 for cloud in self.get_clouds_low_to_high(): total_num_valid_cores += cloud.get_total_num_valid_cores() return total_num_valid_cores def _update_cluster_instances(self, cluster): out_of_date = [] cloud_dns_names = [] clouds = self.get_clouds_low_to_high() for cloud in clouds: for instance in cloud.all_instances: cloud_dns_names.append(instance.public_dns_name) for node in cluster.nodes: if node.public_dns_name not in cloud_dns_names: LOG.debug("%s appears out of date" % node.public_dns_name) if node.public_dns_name in self._instances_out_of_date: out_of_date.append(node.public_dns_name) else: self._instances_out_of_date.append(node.public_dns_name) elif node.public_dns_name in cloud_dns_names: if node.public_dns_name in self._instances_out_of_date: self._instances_out_of_date.remove(node.public_dns_name) LOG.debug("Instances no longer exist, removing: %s" % out_of_date) for public_dns_name in out_of_date: cluster.remove_node(public_dns_name) if public_dns_name in self._instances_out_of_date: self._instances_out_of_date.remove(public_dns_name) LOG.debug("Attempting to add new nodes") for cloud in clouds: for instance in cloud.all_instances: if instance.public_dns_name: cluster.add_node(instance.public_dns_name, cloud.config.instance_cores) def refresh_all(self, cluster): for cloud_name in self.clouds.keys(): self.clouds[cloud_name].refresh(cluster) self._update_cluster_instances(cluster)
import numpy as np import scipy.sparse as sp import pytest from re import escape from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_raises_regexp from sklearn.utils._mocking import CheckingClassifier from sklearn.multiclass import OneVsRestClassifier from sklearn.multiclass import OneVsOneClassifier from sklearn.multiclass import OutputCodeClassifier from sklearn.utils.multiclass import (check_classification_targets, type_of_target) from sklearn.utils import check_array from sklearn.utils import shuffle from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.svm import LinearSVC, SVC from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge, Perceptron, LogisticRegression, SGDClassifier) from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.pipeline import Pipeline from sklearn import svm from sklearn import datasets iris = datasets.load_iris() rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] n_classes = 3 def test_ovr_exceptions(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovr.predict, []) # Fail on multioutput data assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1, 2], [3, 1]])) assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1.5, 2.4], [3.1, 0.8]])) def test_check_classification_targets(): # Test that check_classification_target return correct type. #5782 y = np.array([0.0, 1.1, 2.0, 3.0]) msg = type_of_target(y) assert_raise_message(ValueError, msg, check_classification_targets, y) def test_ovr_fit_predict(): # A classifier which implements decision_function. ovr = OneVsRestClassifier(LinearSVC(random_state=0)) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes clf = LinearSVC(random_state=0) pred2 = clf.fit(iris.data, iris.target).predict(iris.data) assert np.mean(iris.target == pred) == np.mean(iris.target == pred2) # A classifier which implements predict_proba. ovr = OneVsRestClassifier(MultinomialNB()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert np.mean(iris.target == pred) > 0.65 def test_ovr_partial_fit(): # Test if partial_fit is working as intended X, y = shuffle(iris.data, iris.target, random_state=0) ovr = OneVsRestClassifier(MultinomialNB()) ovr.partial_fit(X[:100], y[:100], np.unique(y)) ovr.partial_fit(X[100:], y[100:]) pred = ovr.predict(X) ovr2 = OneVsRestClassifier(MultinomialNB()) pred2 = ovr2.fit(X, y).predict(X) assert_almost_equal(pred, pred2) assert len(ovr.estimators_) == len(np.unique(y)) assert np.mean(y == pred) > 0.65 # Test when mini batches doesn't have all classes # with SGDClassifier X = np.abs(np.random.randn(14, 2)) y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3] ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None, shuffle=False, random_state=0)) ovr.partial_fit(X[:7], y[:7], np.unique(y)) ovr.partial_fit(X[7:], y[7:]) pred = ovr.predict(X) ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None, shuffle=False, random_state=0)) pred1 = ovr1.fit(X, y).predict(X) assert np.mean(pred == y) == np.mean(pred1 == y) # test partial_fit only exists if estimator has it: ovr = OneVsRestClassifier(SVC()) assert not hasattr(ovr, "partial_fit") def test_ovr_partial_fit_exceptions(): ovr = OneVsRestClassifier(MultinomialNB()) X = np.abs(np.random.randn(14, 2)) y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3] ovr.partial_fit(X[:7], y[:7], np.unique(y)) # A new class value which was not in the first call of partial_fit # It should raise ValueError y1 = [5] + y[7:-1] assert_raises_regexp(ValueError, r"Mini-batch contains \[.+\] while " r"classes must be subset of \[.+\]", ovr.partial_fit, X=X[7:], y=y1) def test_ovr_ovo_regressor(): # test that ovr and ovo work on regressors which don't have a decision_ # function ovr = OneVsRestClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert np.mean(pred == iris.target) > .9 ovr = OneVsOneClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2 assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert np.mean(pred == iris.target) > .9 def test_ovr_fit_predict_sparse(): for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]: base_clf = MultinomialNB(alpha=1) X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) assert clf.multilabel_ assert sp.issparse(Y_pred_sprs) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba Y_proba = clf_sprs.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred_sprs.toarray()) # Test decision_function clf = svm.SVC() clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train)) dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int) assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray()) def test_ovr_always_present(): # Test that ovr works with classes that are always present or absent. # Note: tests is the case where _ConstantPredictor is utilised X = np.ones((10, 2)) X[:5, :] = 0 # Build an indicator matrix where two features are always on. # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)] y = np.zeros((10, 3)) y[5:, 0] = 1 y[:, 1] = 1 y[:, 2] = 1 ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict(X) assert_array_equal(np.array(y_pred), np.array(y)) y_pred = ovr.decision_function(X) assert np.unique(y_pred[:, -2:]) == 1 y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.ones(X.shape[0])) # y has a constantly absent label y = np.zeros((10, 2)) y[5:, 0] = 1 # variable label ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0])) def test_ovr_multiclass(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "ham", "eggs", "ham"] Y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): clf = OneVsRestClassifier(base_clf).fit(X, y) assert set(clf.classes_) == classes y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_array_equal(y_pred, ["eggs"]) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 0, 4]])[0] assert_array_equal(y_pred, [0, 0, 1]) def test_ovr_binary(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "spam", "eggs", "spam"] Y = np.array([[0, 1, 1, 0, 1]]).T classes = set("eggs spam".split()) def conduct_test(base_clf, test_predict_proba=False): clf = OneVsRestClassifier(base_clf).fit(X, y) assert set(clf.classes_) == classes y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_array_equal(y_pred, ["eggs"]) if hasattr(base_clf, 'decision_function'): dec = clf.decision_function(X) assert dec.shape == (5,) if test_predict_proba: X_test = np.array([[0, 0, 4]]) probabilities = clf.predict_proba(X_test) assert 2 == len(probabilities[0]) assert (clf.classes_[np.argmax(probabilities, axis=1)] == clf.predict(X_test)) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[3, 0, 0]])[0] assert y_pred == 1 for base_clf in (LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): conduct_test(base_clf) for base_clf in (MultinomialNB(), SVC(probability=True), LogisticRegression()): conduct_test(base_clf, test_predict_proba=True) def test_ovr_multilabel(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]]) y = np.array([[0, 1, 1], [0, 1, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0]]) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), Lasso(alpha=0.5)): clf = OneVsRestClassifier(base_clf).fit(X, y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) assert clf.multilabel_ def test_ovr_fit_predict_svc(): ovr = OneVsRestClassifier(svm.SVC()) ovr.fit(iris.data, iris.target) assert len(ovr.estimators_) == 3 assert ovr.score(iris.data, iris.target) > .9 def test_ovr_multilabel_dataset(): base_clf = MultinomialNB(alpha=1) for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=au, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) assert clf.multilabel_ assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2) assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"), recall, decimal=2) def test_ovr_multilabel_predict_proba(): base_clf = MultinomialNB(alpha=1) for au in (False, True): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=au, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') # Estimator with predict_proba disabled, depending on parameters. decision_only = OneVsRestClassifier(svm.SVC(probability=False)) assert not hasattr(decision_only, 'predict_proba') decision_only.fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') assert hasattr(decision_only, 'decision_function') # Estimator which can get predict_proba enabled after fitting gs = GridSearchCV(svm.SVC(probability=False), param_grid={'probability': [True]}) proba_after_fit = OneVsRestClassifier(gs) assert not hasattr(proba_after_fit, 'predict_proba') proba_after_fit.fit(X_train, Y_train) assert hasattr(proba_after_fit, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred) def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = np.array([l.argmax() for l in Y_proba]) assert not (pred - Y_pred).any() def test_ovr_multilabel_decision_function(): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal((clf.decision_function(X_test) > 0).astype(int), clf.predict(X_test)) def test_ovr_single_label_decision_function(): X, Y = datasets.make_classification(n_samples=100, n_features=20, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal(clf.decision_function(X_test).ravel() > 0, clf.predict(X_test)) def test_ovr_gridsearch(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovr, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ovr_pipeline(): # Test with pipeline of length one # This test is needed because the multiclass estimators may fail to detect # the presence of predict_proba or decision_function. clf = Pipeline([("tree", DecisionTreeClassifier())]) ovr_pipe = OneVsRestClassifier(clf) ovr_pipe.fit(iris.data, iris.target) ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data)) def test_ovr_coef_(): for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]: # SVC has sparse coef with sparse input data ovr = OneVsRestClassifier(base_classifier) for X in [iris.data, sp.csr_matrix(iris.data)]: # test with dense and sparse coef ovr.fit(X, iris.target) shape = ovr.coef_.shape assert shape[0] == n_classes assert shape[1] == iris.data.shape[1] # don't densify sparse coefficients assert (sp.issparse(ovr.estimators_[0].coef_) == sp.issparse(ovr.coef_)) def test_ovr_coef_exceptions(): # Not fitted exception! ovr = OneVsRestClassifier(LinearSVC(random_state=0)) # lambda is needed because we don't want coef_ to be evaluated right away assert_raises(ValueError, lambda x: ovr.coef_, None) # Doesn't have coef_ exception! ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_raises(AttributeError, lambda x: ovr.coef_, None) def test_ovo_exceptions(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovo.predict, []) def test_ovo_fit_on_list(): # Test that OneVsOne fitting works with a list of targets and yields the # same output as predict from an array ovo = OneVsOneClassifier(LinearSVC(random_state=0)) prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data) iris_data_list = [list(a) for a in iris.data] prediction_from_list = ovo.fit(iris_data_list, list(iris.target)).predict(iris_data_list) assert_array_equal(prediction_from_array, prediction_from_list) def test_ovo_fit_predict(): # A classifier which implements decision_function. ovo = OneVsOneClassifier(LinearSVC(random_state=0)) ovo.fit(iris.data, iris.target).predict(iris.data) assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2 # A classifier which implements predict_proba. ovo = OneVsOneClassifier(MultinomialNB()) ovo.fit(iris.data, iris.target).predict(iris.data) assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2 def test_ovo_partial_fit_predict(): temp = datasets.load_iris() X, y = temp.data, temp.target ovo1 = OneVsOneClassifier(MultinomialNB()) ovo1.partial_fit(X[:100], y[:100], np.unique(y)) ovo1.partial_fit(X[100:], y[100:]) pred1 = ovo1.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) ovo2.fit(X, y) pred2 = ovo2.predict(X) assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2 assert np.mean(y == pred1) > 0.65 assert_almost_equal(pred1, pred2) # Test when mini-batches have binary target classes ovo1 = OneVsOneClassifier(MultinomialNB()) ovo1.partial_fit(X[:60], y[:60], np.unique(y)) ovo1.partial_fit(X[60:], y[60:]) pred1 = ovo1.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) pred2 = ovo2.fit(X, y).predict(X) assert_almost_equal(pred1, pred2) assert len(ovo1.estimators_) == len(np.unique(y)) assert np.mean(y == pred1) > 0.65 ovo = OneVsOneClassifier(MultinomialNB()) X = np.random.rand(14, 2) y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2] ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4]) ovo.partial_fit(X[7:], y[7:]) pred = ovo.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) pred2 = ovo2.fit(X, y).predict(X) assert_almost_equal(pred, pred2) # raises error when mini-batch does not have classes from all_classes ovo = OneVsOneClassifier(MultinomialNB()) error_y = [0, 1, 2, 3, 4, 5, 2] message_re = escape("Mini-batch contains {0} while " "it must be subset of {1}".format(np.unique(error_y), np.unique(y))) assert_raises_regexp(ValueError, message_re, ovo.partial_fit, X[:7], error_y, np.unique(y)) # test partial_fit only exists if estimator has it: ovr = OneVsOneClassifier(SVC()) assert not hasattr(ovr, "partial_fit") def test_ovo_decision_function(): n_samples = iris.data.shape[0] ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0)) # first binary ovo_clf.fit(iris.data, iris.target == 0) decisions = ovo_clf.decision_function(iris.data) assert decisions.shape == (n_samples,) # then multi-class ovo_clf.fit(iris.data, iris.target) decisions = ovo_clf.decision_function(iris.data) assert decisions.shape == (n_samples, n_classes) assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data)) # Compute the votes votes = np.zeros((n_samples, n_classes)) k = 0 for i in range(n_classes): for j in range(i + 1, n_classes): pred = ovo_clf.estimators_[k].predict(iris.data) votes[pred == 0, i] += 1 votes[pred == 1, j] += 1 k += 1 # Extract votes and verify assert_array_equal(votes, np.round(decisions)) for class_idx in range(n_classes): # For each sample and each class, there only 3 possible vote levels # because they are only 3 distinct class pairs thus 3 distinct # binary classifiers. # Therefore, sorting predictions based on votes would yield # mostly tied predictions: assert set(votes[:, class_idx]).issubset(set([0., 1., 2.])) # The OVO decision function on the other hand is able to resolve # most of the ties on this data as it combines both the vote counts # and the aggregated confidence levels of the binary classifiers # to compute the aggregate decision function. The iris dataset # has 150 samples with a couple of duplicates. The OvO decisions # can resolve most of the ties: assert len(np.unique(decisions[:, class_idx])) > 146 def test_ovo_gridsearch(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovo, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ovo_ties(): # Test that ties are broken using the decision function, # not defaulting to the smallest label X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y = np.array([2, 0, 1, 2]) multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4, tol=None)) ovo_prediction = multi_clf.fit(X, y).predict(X) ovo_decision = multi_clf.decision_function(X) # Classifiers are in order 0-1, 0-2, 1-2 # Use decision_function to compute the votes and the normalized # sum_of_confidences, which is used to disambiguate when there is a tie in # votes. votes = np.round(ovo_decision) normalized_confidences = ovo_decision - votes # For the first point, there is one vote per class assert_array_equal(votes[0, :], 1) # For the rest, there is no tie and the prediction is the argmax assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:]) # For the tie, the prediction is the class with the highest score assert ovo_prediction[0] == normalized_confidences[0].argmax() def test_ovo_ties2(): # test that ties can not only be won by the first two labels X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y_ref = np.array([2, 0, 1, 2]) # cycle through labels so that each label wins once for i in range(3): y = (y_ref + i) % 3 multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4, tol=None)) ovo_prediction = multi_clf.fit(X, y).predict(X) assert ovo_prediction[0] == i % 3 def test_ovo_string_y(): # Test that the OvO doesn't mess up the encoding of string labels X = np.eye(4) y = np.array(['a', 'b', 'c', 'd']) ovo = OneVsOneClassifier(LinearSVC()) ovo.fit(X, y) assert_array_equal(y, ovo.predict(X)) def test_ovo_one_class(): # Test error for OvO with one class X = np.eye(4) y = np.array(['a'] * 4) ovo = OneVsOneClassifier(LinearSVC()) assert_raise_message(ValueError, "when only one class", ovo.fit, X, y) def test_ovo_float_y(): # Test that the OvO errors on float targets X = iris.data y = iris.data[:, 0] ovo = OneVsOneClassifier(LinearSVC()) assert_raise_message(ValueError, "Unknown label type", ovo.fit, X, y) def test_ecoc_exceptions(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ecoc.predict, []) def test_ecoc_fit_predict(): # A classifier which implements decision_function. ecoc = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert len(ecoc.estimators_) == n_classes * 2 # A classifier which implements predict_proba. ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert len(ecoc.estimators_) == n_classes * 2 def test_ecoc_gridsearch(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0), random_state=0) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ecoc, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ecoc_float_y(): # Test that the OCC errors on float targets X = iris.data y = iris.data[:, 0] ovo = OutputCodeClassifier(LinearSVC()) assert_raise_message(ValueError, "Unknown label type", ovo.fit, X, y) ovo = OutputCodeClassifier(LinearSVC(), code_size=-1) assert_raise_message(ValueError, "code_size should be greater than 0," " got -1", ovo.fit, X, y) def test_ecoc_delegate_sparse_base_estimator(): # Non-regression test for # https://github.com/scikit-learn/scikit-learn/issues/17218 X, y = iris.data, iris.target X_sp = sp.csc_matrix(X) # create an estimator that does not support sparse input base_estimator = CheckingClassifier( check_X=check_array, check_X_params={"ensure_2d": True, "accept_sparse": False}, ) ecoc = OutputCodeClassifier(base_estimator, random_state=0) with pytest.raises(TypeError, match="A sparse matrix was passed"): ecoc.fit(X_sp, y) ecoc.fit(X, y) with pytest.raises(TypeError, match="A sparse matrix was passed"): ecoc.predict(X_sp) # smoke test to check when sparse input should be supported ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) ecoc.fit(X_sp, y).predict(X_sp) assert len(ecoc.estimators_) == 4 def test_pairwise_indices(): clf_precomputed = svm.SVC(kernel='precomputed') X, y = iris.data, iris.target ovr_false = OneVsOneClassifier(clf_precomputed) linear_kernel = np.dot(X, X.T) ovr_false.fit(linear_kernel, y) n_estimators = len(ovr_false.estimators_) precomputed_indices = ovr_false.pairwise_indices_ for idx in precomputed_indices: assert (idx.shape[0] * n_estimators / (n_estimators - 1) == linear_kernel.shape[0]) def test_pairwise_attribute(): clf_precomputed = svm.SVC(kernel='precomputed') clf_notprecomputed = svm.SVC() for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]: ovr_false = MultiClassClassifier(clf_notprecomputed) assert not ovr_false._pairwise ovr_true = MultiClassClassifier(clf_precomputed) assert ovr_true._pairwise def test_pairwise_cross_val_score(): clf_precomputed = svm.SVC(kernel='precomputed') clf_notprecomputed = svm.SVC(kernel='linear') X, y = iris.data, iris.target for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]: ovr_false = MultiClassClassifier(clf_notprecomputed) ovr_true = MultiClassClassifier(clf_precomputed) linear_kernel = np.dot(X, X.T) score_precomputed = cross_val_score(ovr_true, linear_kernel, y) score_linear = cross_val_score(ovr_false, X, y) assert_array_equal(score_precomputed, score_linear)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class HubVirtualNetworkConnectionsOperations(object): """HubVirtualNetworkConnectionsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2021_05_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _create_or_update_initial( self, resource_group_name, # type: str virtual_hub_name, # type: str connection_name, # type: str hub_virtual_network_connection_parameters, # type: "_models.HubVirtualNetworkConnection" **kwargs # type: Any ): # type: (...) -> "_models.HubVirtualNetworkConnection" cls = kwargs.pop('cls', None) # type: ClsType["_models.HubVirtualNetworkConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(hub_virtual_network_connection_parameters, 'HubVirtualNetworkConnection') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str virtual_hub_name, # type: str connection_name, # type: str hub_virtual_network_connection_parameters, # type: "_models.HubVirtualNetworkConnection" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.HubVirtualNetworkConnection"] """Creates a hub virtual network connection if it doesn't exist else updates the existing one. :param resource_group_name: The resource group name of the HubVirtualNetworkConnection. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :param connection_name: The name of the HubVirtualNetworkConnection. :type connection_name: str :param hub_virtual_network_connection_parameters: Parameters supplied to create or update a hub virtual network connection. :type hub_virtual_network_connection_parameters: ~azure.mgmt.network.v2021_05_01.models.HubVirtualNetworkConnection :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either HubVirtualNetworkConnection or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_05_01.models.HubVirtualNetworkConnection] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.HubVirtualNetworkConnection"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, virtual_hub_name=virtual_hub_name, connection_name=connection_name, hub_virtual_network_connection_parameters=hub_virtual_network_connection_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore def _delete_initial( self, resource_group_name, # type: str virtual_hub_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str virtual_hub_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes a HubVirtualNetworkConnection. :param resource_group_name: The resource group name of the VirtualHub. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :param connection_name: The name of the HubVirtualNetworkConnection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, virtual_hub_name=virtual_hub_name, connection_name=connection_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore def get( self, resource_group_name, # type: str virtual_hub_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.HubVirtualNetworkConnection" """Retrieves the details of a HubVirtualNetworkConnection. :param resource_group_name: The resource group name of the VirtualHub. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :param connection_name: The name of the vpn connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: HubVirtualNetworkConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_05_01.models.HubVirtualNetworkConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.HubVirtualNetworkConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore def list( self, resource_group_name, # type: str virtual_hub_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.ListHubVirtualNetworkConnectionsResult"] """Retrieves the details of all HubVirtualNetworkConnections. :param resource_group_name: The resource group name of the VirtualHub. :type resource_group_name: str :param virtual_hub_name: The name of the VirtualHub. :type virtual_hub_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ListHubVirtualNetworkConnectionsResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.ListHubVirtualNetworkConnectionsResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHubVirtualNetworkConnectionsResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('ListHubVirtualNetworkConnectionsResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections'} # type: ignore
# Copyright (c) 2013 Huawei Technologies Co., Ltd. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for Huawei T and Dorado volume drivers. """ import mox import os import shutil import socket import tempfile import time from xml.dom.minidom import Document from xml.etree import ElementTree as ET from cinder import context from cinder import exception from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import HuaweiVolumeDriver from cinder.volume.drivers.huawei import ssh_common from cinder.volume import volume_types LUN_INFO = {'ID': None, 'Name': None, 'Size': None, 'LUN WWN': None, 'Status': None, 'Visible Capacity': None, 'Disk Pool ID': None, 'Cache Prefetch Strategy': None, 'Lun Type': None, 'Consumed Capacity': None, 'Pool ID': None, 'SnapShot ID': None, 'LunCopy ID': None, 'Owner Controller': None, 'Worker Controller': None, 'RAID Group ID': None} CLONED_LUN_INFO = {'ID': None, 'Name': None, 'Size': None, 'LUN WWN': None, 'Status': None, 'Visible Capacity': None, 'Disk Pool ID': None, 'Cache Prefetch Strategy': None, 'Lun Type': None, 'Consumed Capacity': None, 'Pool ID': None, 'SnapShot ID': None, 'LunCopy ID': None, 'Owner Controller': None, 'Worker Controller': None, 'RAID Group ID': None} SNAPSHOT_INFO = {'Source LUN ID': None, 'Source LUN Name': None, 'ID': None, 'Name': None, 'Type': 'Public', 'Status': None} MAP_INFO = {'Host Group ID': None, 'Host Group Name': None, 'Host ID': None, 'Host Name': None, 'Os Type': None, 'INI Port ID': None, 'INI Port Name': None, 'INI Port Info': None, 'INI Port WWN': None, 'INI Port Type': None, 'Link Status': None, 'LUN WWN': None, 'DEV LUN ID': None, 'Host LUN ID': None, 'CHAP status': False} HOST_PORT_INFO = {'ID': None, 'Name': None, 'Info': None, 'WWN': None, 'Type': None} LUNCOPY_INFO = {'Name': None, 'ID': None, 'Type': None, 'State': None, 'Status': None} LUNCOPY_SETTING = {'ID': '1', 'Type': 'FULL', 'State': 'Created', 'Status': 'Normal'} POOL_SETTING = {'ID': '2', 'Level': 'RAID6', 'Status': 'Normal', 'Free Capacity': '10240', 'Disk List': '0,1;0,2;0,3;0,4;0,5;0,6', 'Name': 'RAID_001', 'Type': 'Thick'} INITIATOR_SETTING = {'TargetIQN': 'iqn.2006-08.com.huawei:oceanspace:2103037:', 'TargetIQN-form': 'iqn.2006-08.com.huawei:oceanspace:' '2103037::1020001:192.168.100.2', 'Initiator Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'Initiator TargetIP': '192.168.100.2', 'WWN': ['2011666666666565']} FAKE_VOLUME = {'name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe', 'id': 'lele34fe-223f-dd33-4423-asdfghjklqwe', 'size': '2', 'provider_auth': None, 'volume_type_id': None, 'provider_location': None} FAKE_CLONED_VOLUME = {'name': 'Volume-jeje34fe-223f-dd33-4423-asdfghjklqwg', 'id': 'jeje34fe-223f-dd33-4423-asdfghjklqwg', 'size': '3', 'provider_auth': None, 'volume_type_id': None, 'provider_location': None} FAKE_SNAPSHOT = {'name': 'keke34fe-223f-dd33-4423-asdfghjklqwf', 'id': '223f-dd33-4423-asdfghjklqwf', 'volume_name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe', 'provider_location': None} FAKE_CONNECTOR = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'wwpns': ['1000000164s45126'], 'wwnns': ['2000666666666565'], 'host': 'fakehost', 'ip': '10.10.0.1'} RESPOOL_A_SIM = {'Size': '10240', 'Valid Size': '5120'} RESPOOL_B_SIM = {'Size': '10240', 'Valid Size': '10240'} VOLUME_SNAP_ID = {'vol': '0', 'vol_copy': '1', 'snap': '2'} cmd_error_list = [] # CLI cmds in this list will run failed Curr_test = [''] # show current testing driver class FakeChannel(): def __init__(self): if Curr_test[0] == 'T': self.simu = HuaweiTCLIResSimulator() elif Curr_test[0] == 'Dorado5100': self.simu = HuaweiDorado5100CLIResSimulator() else: self.simu = HuaweiDorado2100G2CLIResSimulator() def resize_pty(self, width=80, height=24): pass def settimeout(self, time): pass def send(self, s): self.command = s def recv(self, nbytes): command = self.command.split() cmd = command[0] params = command[1:] if cmd in cmd_error_list: reset_error_flg(cmd) out = self.command[:-1] + 'ERROR' + '\nadmin:/>' return out.replace('\n', '\r\n') func_name = 'cli_' + cmd cli_func = getattr(self.simu, func_name) out = cli_func(params) out = self.command[:-1] + out + '\nadmin:/>' return out.replace('\n', '\r\n') def close(self): pass class FakeSSHClient(): def invoke_shell(self): return FakeChannel() def get_transport(self): class transport(): def __init__(self): self.sock = sock() class sock(): def settimeout(self, time): pass return transport() def close(self): pass class FakeSSHPool(): def __init__(self, ip, port, conn_timeout, login, password=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password def create(self): return FakeSSHClient() def get(self): return FakeSSHClient() def put(self, ssh): pass def remove(self, ssh): pass def Fake_sleep(time): pass def Fake_change_file_mode(obj, filepath): pass def create_fake_conf_file(filename): doc = Document() config = doc.createElement('config') doc.appendChild(config) storage = doc.createElement('Storage') config.appendChild(storage) product = doc.createElement('Product') product_text = doc.createTextNode('T') product.appendChild(product_text) storage.appendChild(product) config.appendChild(storage) protocol = doc.createElement('Protocol') protocol_text = doc.createTextNode('iSCSI') protocol.appendChild(protocol_text) storage.appendChild(protocol) controllerip0 = doc.createElement('ControllerIP0') controllerip0_text = doc.createTextNode('10.10.10.1') controllerip0.appendChild(controllerip0_text) storage.appendChild(controllerip0) controllerip1 = doc.createElement('ControllerIP1') controllerip1_text = doc.createTextNode('10.10.10.2') controllerip1.appendChild(controllerip1_text) storage.appendChild(controllerip1) username = doc.createElement('UserName') username_text = doc.createTextNode('admin') username.appendChild(username_text) storage.appendChild(username) userpassword = doc.createElement('UserPassword') userpassword_text = doc.createTextNode('123456') userpassword.appendChild(userpassword_text) storage.appendChild(userpassword) lun = doc.createElement('LUN') config.appendChild(lun) storagepool = doc.createElement('StoragePool') storagepool.setAttribute('Name', 'RAID_001') lun.appendChild(storagepool) luntype = doc.createElement('LUNType') luntype_text = doc.createTextNode('Thick') luntype.appendChild(luntype_text) lun.appendChild(luntype) iscsi = doc.createElement('iSCSI') config.appendChild(iscsi) defaulttargetip = doc.createElement('DefaultTargetIP') defaulttargetip_text = doc.createTextNode('192.168.100.1') defaulttargetip.appendChild(defaulttargetip_text) iscsi.appendChild(defaulttargetip) initiator = doc.createElement('Initiator') initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') initiator.setAttribute('TargetIP', '192.168.100.2') iscsi.appendChild(initiator) os_type = doc.createElement('Host') os_type.setAttribute('OSType', 'Linux') os_type.setAttribute('HostIP', '10.10.0.1') config.appendChild(os_type) tmp_file = open(filename, 'w') tmp_file.write(doc.toprettyxml(indent='')) tmp_file.close() def modify_conf(conf, item, val, attrib=None): tree = ET.parse(conf) root = tree.getroot() conf_item = root.find('%s' % item) if not attrib: conf_item.text = '%s' % val else: conf_item.attrib['%s' % attrib] = '%s' % val tree.write(conf, 'UTF-8') def set_error_flg(cmd): cmd_error_list.append(cmd) def reset_error_flg(cmd): cmd_error_list.remove(cmd) class HuaweiTCLIResSimulator(): def _paras_name(self, params): index = params.index('-n') return params[index + 1] def cli_showsys(self, params): pass def cli_createlun(self, params): lun_type = ('THIN' if '-pool' in params else 'THICK') if LUN_INFO['ID'] is None: LUN_INFO['Name'] = self._paras_name(params) LUN_INFO['ID'] = VOLUME_SNAP_ID['vol'] LUN_INFO['Size'] = FAKE_VOLUME['size'] LUN_INFO['Lun Type'] = lun_type LUN_INFO['Owner Controller'] = 'A' LUN_INFO['Worker Controller'] = 'A' LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] FAKE_VOLUME['provider_location'] = LUN_INFO['ID'] else: CLONED_LUN_INFO['Name'] = self._paras_name(params) CLONED_LUN_INFO['ID'] = VOLUME_SNAP_ID['vol_copy'] CLONED_LUN_INFO['Size'] = FAKE_CLONED_VOLUME['size'] CLONED_LUN_INFO['Lun Type'] = lun_type CLONED_LUN_INFO['Owner Controller'] = 'A' CLONED_LUN_INFO['Worker Controller'] = 'A' CLONED_LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] FAKE_CLONED_VOLUME['provider_location'] = CLONED_LUN_INFO['ID'] out = 'command operates successfully' return out def cli_showlun(self, params): if '-lun' not in params: if LUN_INFO['ID'] is None: out = 'command operates successfully, but no information.' elif CLONED_LUN_INFO['ID'] is None: msg = """/>showlun =========================================================================== LUN Information --------------------------------------------------------------------------- ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB) \ LUN Name Stripe Unit Size(KB) Lun Type --------------------------------------------------------------------------- %s %s -- Normal %s %s %s 64 THICK =========================================================================== """ out = msg % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name']) else: msg = """/>showlun ============================================================================ LUN Information ---------------------------------------------------------------------------- ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB)\ LUN Name Stripe Unit Size(KB) Lun Type ---------------------------------------------------------------------------- %s %s -- Normal %s %s %s 64 THICK %s %s -- Normal %s %s %s 64 THICK ============================================================================ """ out = msg % ( LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'], CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'], CLONED_LUN_INFO['Owner Controller'], str(int(CLONED_LUN_INFO['Size']) * 1024), CLONED_LUN_INFO['Name']) elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values(): msg = """/>showlun ================================================ LUN Information ------------------------------------------------ ID | %s Name | %s LUN WWN | -- Visible Capacity | %s RAID GROUP ID | %s Owning Controller | %s Workong Controller | %s Lun Type | %s SnapShot ID | %s LunCopy ID | %s ================================================ """ out = msg % ( (LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'], LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID']) if (params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']) else (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'], CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'], CLONED_LUN_INFO['Owner Controller'], CLONED_LUN_INFO['Worker Controller'], CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'], CLONED_LUN_INFO['LunCopy ID'])) else: out = 'ERROR: The object does not exist.' return out def cli_dellun(self, params): if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']: LUN_INFO['Name'] = None LUN_INFO['ID'] = None LUN_INFO['Size'] = None LUN_INFO['Lun Type'] = None LUN_INFO['LUN WWN'] = None LUN_INFO['Owner Controller'] = None LUN_INFO['Worker Controller'] = None LUN_INFO['RAID Group ID'] = None FAKE_VOLUME['provider_location'] = None else: CLONED_LUN_INFO['Name'] = None CLONED_LUN_INFO['ID'] = None CLONED_LUN_INFO['Size'] = None CLONED_LUN_INFO['Lun Type'] = None CLONED_LUN_INFO['LUN WWN'] = None CLONED_LUN_INFO['Owner Controller'] = None CLONED_LUN_INFO['Worker Controller'] = None CLONED_LUN_INFO['RAID Group ID'] = None CLONED_LUN_INFO['provider_location'] = None FAKE_CLONED_VOLUME['provider_location'] = None out = 'command operates successfully' return out def cli_showrg(self, params): msg = """/>showrg ===================================================================== RAID Group Information --------------------------------------------------------------------- ID Level Status Free Capacity(MB) Disk List Name --------------------------------------------------------------------- 0 RAID6 Normal 1024 0,0;0,2; RAID003 %s %s %s %s %s %s ===================================================================== -""" out = msg % (POOL_SETTING['ID'], POOL_SETTING['Level'], POOL_SETTING['Status'], POOL_SETTING['Free Capacity'], POOL_SETTING['Disk List'], POOL_SETTING['Name']) return out def cli_showpool(self, params): out = """/>showpool ===================================================================== Pool Information --------------------------------------------------------------------- Level Status Available Capacity(MB) Disk List --------------------------------------------------------------------- RAID6 Normal %s 0,0;0,2;0,4;0,5; ===================================================================== -""" % POOL_SETTING['Free Capacity'] return out def cli_createluncopy(self, params): src_id = params[params.index('-slun') + 1] tgt_id = params[params.index('-tlun') + 1] LUNCOPY_INFO['Name'] = 'OpenStack_%s_%s' % (src_id, tgt_id) LUNCOPY_INFO['ID'] = LUNCOPY_SETTING['ID'] LUNCOPY_INFO['Type'] = LUNCOPY_SETTING['Type'] LUNCOPY_INFO['State'] = LUNCOPY_SETTING['State'] LUNCOPY_INFO['Status'] = LUNCOPY_SETTING['Status'] out = 'command operates successfully' return out def cli_chgluncopystatus(self, params): LUNCOPY_INFO['State'] = 'Start' out = 'command operates successfully' return out def cli_showluncopy(self, params): if LUNCOPY_INFO['State'] == 'Start': LUNCOPY_INFO['State'] = 'Copying' elif LUNCOPY_INFO['State'] == 'Copying': LUNCOPY_INFO['State'] = 'Complete' msg = """/>showluncopy ============================================================================ LUN Copy Information ---------------------------------------------------------------------------- LUN Copy Name LUN Copy ID Type LUN Copy State LUN Copy Status ---------------------------------------------------------------------------- %s %s %s %s %s ============================================================================ """ out = msg % (LUNCOPY_INFO['Name'], LUNCOPY_INFO['ID'], LUNCOPY_INFO['Type'], LUNCOPY_INFO['State'], LUNCOPY_INFO['Status']) return out def cli_delluncopy(self, params): LUNCOPY_INFO['Name'] = None LUNCOPY_INFO['ID'] = None LUNCOPY_INFO['Type'] = None LUNCOPY_INFO['State'] = None LUNCOPY_INFO['Status'] = None out = 'command operates successfully' return out def cli_createsnapshot(self, params): SNAPSHOT_INFO['Source LUN ID'] = LUN_INFO['ID'] SNAPSHOT_INFO['Source LUN Name'] = LUN_INFO['Name'] SNAPSHOT_INFO['ID'] = VOLUME_SNAP_ID['snap'] SNAPSHOT_INFO['Name'] = self._paras_name(params) SNAPSHOT_INFO['Status'] = 'Disable' out = 'command operates successfully' return out def cli_showsnapshot(self, params): if SNAPSHOT_INFO['ID'] is None: out = 'command operates successfully, but no information.' else: out = """/>showsnapshot ========================================================================== Snapshot Information -------------------------------------------------------------------------- Name ID Type Status Time Stamp -------------------------------------------------------------------------- %s %s Public %s 2013-01-15 14:21:13 ========================================================================== """ % (SNAPSHOT_INFO['Name'], SNAPSHOT_INFO['ID'], SNAPSHOT_INFO['Status']) return out def cli_actvsnapshot(self, params): SNAPSHOT_INFO['Status'] = 'Active' FAKE_SNAPSHOT['provider_location'] = SNAPSHOT_INFO['ID'] out = 'command operates successfully' return out def cli_disablesnapshot(self, params): SNAPSHOT_INFO['Status'] = 'Disable' out = 'command operates successfully' return out def cli_delsnapshot(self, params): SNAPSHOT_INFO['Source LUN ID'] = None SNAPSHOT_INFO['Source LUN Name'] = None SNAPSHOT_INFO['ID'] = None SNAPSHOT_INFO['Name'] = None SNAPSHOT_INFO['Status'] = None FAKE_SNAPSHOT['provider_location'] = None out = 'command operates successfully' return out def cli_showrespool(self, params): msg = """/>showrespool =========================================================================== Resource Pool Information --------------------------------------------------------------------------- Pool ID Size(MB) Usage(MB) Valid Size(MB) Alarm Threshold --------------------------------------------------------------------------- A %s 0.0 %s 80 B %s 0.0 %s 80 =========================================================================== -""" out = msg % (RESPOOL_A_SIM['Size'], RESPOOL_A_SIM['Valid Size'], RESPOOL_B_SIM['Size'], RESPOOL_B_SIM['Valid Size']) return out def cli_showiscsitgtname(self, params): iqn = INITIATOR_SETTING['TargetIQN'] out = """/>showiscsitgtname =================================================================== ISCSI Name ------------------------------------------------------------------- Iscsi Name | %s =================================================================== """ % iqn return out def cli_showiscsiip(self, params): out = """/>showiscsiip ============================================================================ iSCSI IP Information ---------------------------------------------------------------------------- Controller ID Interface Module ID Port ID IP Address Mask ---------------------------------------------------------------------------- B 0 P1 %s 255.255.255.0 ============================================================================ -""" % INITIATOR_SETTING['Initiator TargetIP'] return out def cli_showhostgroup(self, params): if MAP_INFO['Host Group ID'] is None: out = """/>showhostgroup ============================================================ Host Group Information ------------------------------------------------------------ Host Group ID Name File Engine Cluster ------------------------------------------------------------ 0 Default Group NO ============================================================ """ else: out = """/>showhostgroup ============================================================ Host Group Information ------------------------------------------------------------ Host Group ID Name File Engine Cluster ------------------------------------------------------------ 0 Default Group NO %s %s NO ============================================================ """ % (MAP_INFO['Host Group ID'], MAP_INFO['Host Group Name']) return out def cli_createhostgroup(self, params): MAP_INFO['Host Group ID'] = '1' MAP_INFO['Host Group Name'] = 'HostGroup_OpenStack' out = 'command operates successfully' return out def cli_showhost(self, params): if MAP_INFO['Host ID'] is None: out = 'command operates successfully, but no information.' else: out = """/>showhost ======================================================= Host Information ------------------------------------------------------- Host ID Host Name Host Group ID Os Type ------------------------------------------------------- %s %s %s Linux ======================================================= """ % (MAP_INFO['Host ID'], MAP_INFO['Host Name'], MAP_INFO['Host Group ID']) return out def cli_addhost(self, params): MAP_INFO['Host ID'] = '1' MAP_INFO['Host Name'] = 'Host_' + FAKE_CONNECTOR['host'] MAP_INFO['Os Type'] = 'Linux' out = 'command operates successfully' return out def cli_delhost(self, params): MAP_INFO['Host ID'] = None MAP_INFO['Host Name'] = None MAP_INFO['Os Type'] = None out = 'command operates successfully' return out def cli_showiscsiini(self, params): if HOST_PORT_INFO['ID'] is None: out = 'Error: The parameter is wrong.' else: out = """/>showiscsiini ======================================================== Initiator Information -------------------------------------------------------- Initiator Name Chap Status -------------------------------------------------------- %s Disable ======================================================== """ % HOST_PORT_INFO['Info'] return out def cli_addiscsiini(self, params): HOST_PORT_INFO['ID'] = '1' HOST_PORT_INFO['Name'] = 'iSCSIInitiator001' HOST_PORT_INFO['Info'] = INITIATOR_SETTING['Initiator Name'] HOST_PORT_INFO['Type'] = 'ISCSITGT' out = 'command operates successfully' return out def cli_deliscsiini(self, params): HOST_PORT_INFO['ID'] = None HOST_PORT_INFO['Name'] = None HOST_PORT_INFO['Info'] = None HOST_PORT_INFO['Type'] = None out = 'command operates successfully' return out def cli_showhostport(self, params): if MAP_INFO['INI Port ID'] is None: out = 'command operates successfully, but no information.' else: msg = """/>showhostport ============================================================================ Host Port Information ---------------------------------------------------------------------------- Port ID Port Name Port Information Port Type Host ID Link Status \ Multipath Type ---------------------------------------------------------------------------- %s %s %s %s %s Unconnected Default ============================================================================ """ out = msg % (MAP_INFO['INI Port ID'], MAP_INFO['INI Port Name'], MAP_INFO['INI Port Info'], MAP_INFO['INI Port Type'], MAP_INFO['Host ID']) return out def cli_addhostport(self, params): MAP_INFO['INI Port ID'] = HOST_PORT_INFO['ID'] MAP_INFO['INI Port Name'] = HOST_PORT_INFO['Name'] MAP_INFO['INI Port Info'] = HOST_PORT_INFO['Info'] MAP_INFO['INI Port Type'] = HOST_PORT_INFO['Type'] out = 'command operates successfully' return out def cli_delhostport(self, params): MAP_INFO['INI Port ID'] = None MAP_INFO['INI Port Name'] = None MAP_INFO['INI Port Info'] = None MAP_INFO['INI Port Type'] = None HOST_PORT_INFO['ID'] = None HOST_PORT_INFO['Name'] = None HOST_PORT_INFO['Info'] = None HOST_PORT_INFO['Type'] = None out = 'command operates successfully' return out def cli_showhostmap(self, params): if MAP_INFO['DEV LUN ID'] is None: out = 'command operates successfully, but no information.' else: msg = """/>showhostmap =========================================================================== Map Information --------------------------------------------------------------------------- Map ID Working Controller Dev LUN ID LUN WWN Host LUN ID Mapped to\ RAID ID Dev LUN Cap(MB) Map Type Whether Command LUN Pool ID ---------------------------------------------------------------------------- 2147483649 %s %s %s %s Host: %s %s %s HOST No -- ============================================================================ """ out = msg % (LUN_INFO['Worker Controller'], LUN_INFO['ID'], LUN_INFO['LUN WWN'], MAP_INFO['Host LUN ID'], MAP_INFO['Host ID'], LUN_INFO['RAID Group ID'], str(int(LUN_INFO['Size']) * 1024)) return out def cli_addhostmap(self, params): MAP_INFO['DEV LUN ID'] = LUN_INFO['ID'] MAP_INFO['LUN WWN'] = LUN_INFO['LUN WWN'] MAP_INFO['Host LUN ID'] = '2' MAP_INFO['Link Status'] = 'Linked' out = 'command operates successfully' return out def cli_delhostmap(self, params): if MAP_INFO['Link Status'] == 'Linked': MAP_INFO['Link Status'] = 'Deleting' out = 'there are IOs accessing the system, please try later' else: MAP_INFO['Link Status'] = None MAP_INFO['DEV LUN ID'] = None MAP_INFO['LUN WWN'] = None MAP_INFO['Host LUN ID'] = None out = 'command operates successfully' return out def cli_showfreeport(self, params): out = """/>showfreeport ======================================================================= Host Free Port Information ----------------------------------------------------------------------- WWN Or MAC Type Location Connection Status ----------------------------------------------------------------------- 1000000164s45126 FC Primary Controller Connected ======================================================================= """ HOST_PORT_INFO['ID'] = '2' HOST_PORT_INFO['Name'] = 'FCInitiator001' HOST_PORT_INFO['Info'] = '1000000164s45126' HOST_PORT_INFO['Type'] = 'FC' return out def cli_showhostpath(self, params): host = params[params.index('-host') + 1] out = """/>showhostpath -host 1 ======================================= Multi Path Information --------------------------------------- Host ID | %s Controller ID | B Port Type | FC Initiator WWN | 1000000164s45126 Target WWN | %s Host Port ID | 0 Link Status | Normal ======================================= """ % (host, INITIATOR_SETTING['WWN'][0]) return out def cli_showfcmode(self, params): out = """/>showfcport ========================================================================= FC Port Topology Mode ------------------------------------------------------------------------- Controller ID Interface Module ID Port ID WWN Current Mode ------------------------------------------------------------------------- B 1 P0 %s -- ========================================================================= -""" % INITIATOR_SETTING['WWN'][0] return out def cli_chglun(self, params): if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']: LUN_INFO['Owner Controller'] = 'B' else: CLONED_LUN_INFO['Owner Controller'] = 'B' out = 'command operates successfully' return out def cli_addluntoextlun(self, params): LUN_INFO['Size'] = int(LUN_INFO['Size']) + int(CLONED_LUN_INFO['Size']) out = 'command operates successfully' return out def cli_rmlunfromextlun(self, patams): LUN_INFO['Size'] = int(LUN_INFO['Size']) - int(CLONED_LUN_INFO['Size']) out = 'command operates successfully' return out class HuaweiDorado5100CLIResSimulator(HuaweiTCLIResSimulator): def cli_showsys(self, params): out = """/>showsys ============================================================= System Information ------------------------------------------------------------- System Name | SN_Dorado5100 Device Type | Oceanstor Dorado5100 Current System Mode | Double Controllers Normal Mirroring Link Status | Link Up Location | Time | 2013-01-01 01:01:01 Product Version | V100R001C00 ============================================================= """ return out def cli_showlun(self, params): if '-lun' not in params: if LUN_INFO['ID'] is None: out = 'command operates successfully, but no information.' elif CLONED_LUN_INFO['ID'] is None: msg = """/>showlun =========================================================================== LUN Information --------------------------------------------------------------------------- ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name..\ Strip Unit Size(KB) Lun Type --------------------------------------------------------------------------- %s %s Normal %s %s %s 64 THICK =========================================================================== """ out = msg % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name']) else: msg = """/>showlun =========================================================================== LUN Information --------------------------------------------------------------------------- ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name \ Strip Unit Size(KB) Lun Type --------------------------------------------------------------------------- %s %s Normal %s %s %s 64 THICK %s %s Norma %s %s %s 64 THICK =========================================================================== """ out = msg % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'], CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'], CLONED_LUN_INFO['Owner Controller'], str(int(CLONED_LUN_INFO['Size']) * 1024), CLONED_LUN_INFO['Name']) elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values(): msg = """/>showlun ================================================ LUN Information ------------------------------------------------ ID | %s Name | %s LUN WWN | -- Visible Capacity | %s RAID GROUP ID | %s Owning Controller | %s Workong Controller | %s Lun Type | %s SnapShot ID | %s LunCopy ID | %s ================================================ """ out = msg % ( (LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'], LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID']) if (params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']) else (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'], CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'], CLONED_LUN_INFO['Owner Controller'], CLONED_LUN_INFO['Worker Controller'], CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'], CLONED_LUN_INFO['LunCopy ID'])) else: out = 'ERROR: The object does not exist.' return out class HuaweiDorado2100G2CLIResSimulator(HuaweiTCLIResSimulator): def cli_showsys(self, params): out = """/>showsys ========================================================================== System Information -------------------------------------------------------------------------- System Name | SN_Dorado2100_G2 Device Type | Oceanstor Dorado2100 G2 Current System Mode | Double Controllers Normal Mirroring Link Status | Link Up Location | Time | 2013-01-01 01:01:01 Product Version | V100R001C00 =========================================================================== """ return out def cli_createlun(self, params): lun_type = ('THIN' if params[params.index('-type') + 1] == '2' else 'THICK') if LUN_INFO['ID'] is None: LUN_INFO['Name'] = self._paras_name(params) LUN_INFO['ID'] = VOLUME_SNAP_ID['vol'] LUN_INFO['Size'] = FAKE_VOLUME['size'] LUN_INFO['Lun Type'] = lun_type LUN_INFO['Owner Controller'] = 'A' LUN_INFO['Worker Controller'] = 'A' LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] FAKE_VOLUME['provider_location'] = LUN_INFO['ID'] else: CLONED_LUN_INFO['Name'] = self._paras_name(params) CLONED_LUN_INFO['ID'] = VOLUME_SNAP_ID['vol_copy'] CLONED_LUN_INFO['Size'] = FAKE_CLONED_VOLUME['size'] CLONED_LUN_INFO['Lun Type'] = lun_type CLONED_LUN_INFO['Owner Controller'] = 'A' CLONED_LUN_INFO['Worker Controller'] = 'A' CLONED_LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] CLONED_LUN_INFO['provider_location'] = CLONED_LUN_INFO['ID'] FAKE_CLONED_VOLUME['provider_location'] = CLONED_LUN_INFO['ID'] out = 'command operates successfully' return out def cli_showlun(self, params): if '-lun' not in params: if LUN_INFO['ID'] is None: out = 'command operates successfully, but no information.' elif CLONED_LUN_INFO['ID'] is None: msg = """/>showlun =========================================================================== LUN Information --------------------------------------------------------------------------- ID Status Controller Visible Capacity(MB) LUN Name Lun Type --------------------------------------------------------------------------- %s Normal %s %s %s THICK =========================================================================== """ out = msg % (LUN_INFO['ID'], LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name']) else: msg = """/>showlun =========================================================================== LUN Information --------------------------------------------------------------------------- ID Status Controller Visible Capacity(MB) LUN Name Lun Type --------------------------------------------------------------------------- %s Normal %s %s %s THICK %s Normal %s %s %s THICK =========================================================================== """ out = msg % (LUN_INFO['ID'], LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'], CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Owner Controller'], str(int(CLONED_LUN_INFO['Size']) * 1024), CLONED_LUN_INFO['Name']) elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values(): msg = """/>showlun ================================================ LUN Information ------------------------------------------------ ID | %s Name | %s LUN WWN | -- Visible Capacity | %s RAID GROUP ID | %s Owning Controller | %s Workong Controller | %s Lun Type | %s SnapShot ID | %s LunCopy ID | %s ================================================ """ out = msg % ( (LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'], LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID']) if params[params.index('-lun')] == VOLUME_SNAP_ID['vol'] else (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'], CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'], CLONED_LUN_INFO['Owner Controller'], CLONED_LUN_INFO['Worker Controller'], CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'], CLONED_LUN_INFO['LunCopy ID'])) else: out = 'ERROR: The object does not exist.' return out class HuaweiTISCSIDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(HuaweiTISCSIDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiTISCSIDriverTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp_dir) self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' self.addCleanup(os.remove, self.fake_conf_file) create_fake_conf_file(self.fake_conf_file) self.configuration = mox.MockObject(conf.Configuration) self.configuration.cinder_huawei_conf_file = self.fake_conf_file self.configuration.append_config_values(mox.IgnoreArg()) self.stubs.Set(time, 'sleep', Fake_sleep) self.stubs.Set(utils, 'SSHPool', FakeSSHPool) self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode', Fake_change_file_mode) self._init_driver() def _init_driver(self): Curr_test[0] = 'T' self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_conf_invalid(self): # Test config file not found tmp_fonf_file = '/xxx/cinder_huawei_conf.xml' tmp_configuration = mox.MockObject(conf.Configuration) tmp_configuration.cinder_huawei_conf_file = tmp_fonf_file tmp_configuration.append_config_values(mox.IgnoreArg()) self.assertRaises(IOError, HuaweiVolumeDriver, configuration=tmp_configuration) # Test Product and Protocol invalid tmp_dict = {'Storage/Product': 'T', 'Storage/Protocol': 'iSCSI'} for k, v in tmp_dict.items(): modify_conf(self.fake_conf_file, k, 'xx') self.assertRaises(exception.InvalidInput, HuaweiVolumeDriver, configuration=self.configuration) modify_conf(self.fake_conf_file, k, v) # Test ctr ip, UserName and password unspecified tmp_dict = {'Storage/ControllerIP0': '10.10.10.1', 'Storage/ControllerIP1': '10.10.10.2', 'Storage/UserName': 'admin', 'Storage/UserPassword': '123456'} for k, v in tmp_dict.items(): modify_conf(self.fake_conf_file, k, '') tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) self.assertRaises(exception.InvalidInput, tmp_driver.do_setup, None) modify_conf(self.fake_conf_file, k, v) # Test StoragePool unspecified modify_conf(self.fake_conf_file, 'LUN/StoragePool', '', attrib='Name') tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) self.assertRaises(exception.InvalidInput, tmp_driver.do_setup, None) modify_conf(self.fake_conf_file, 'LUN/StoragePool', 'RAID_001', attrib='Name') # Test LUN type invalid modify_conf(self.fake_conf_file, 'LUN/LUNType', 'thick') tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) tmp_driver.do_setup(None) self.assertRaises(exception.InvalidInput, tmp_driver.create_volume, FAKE_VOLUME) modify_conf(self.fake_conf_file, 'LUN/LUNType', 'Thick') # Test OSType invalid modify_conf(self.fake_conf_file, 'Host', 'invalid_type', attrib='OSType') tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) self.assertRaises(exception.InvalidInput, tmp_driver.do_setup, None) modify_conf(self.fake_conf_file, 'Host', 'Linux', attrib='OSType') # Test TargetIP not found modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', '') modify_conf(self.fake_conf_file, 'iSCSI/Initiator', '', attrib='Name') tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) tmp_driver.do_setup(None) tmp_driver.create_volume(FAKE_VOLUME) self.assertRaises(exception.InvalidInput, tmp_driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) tmp_driver.delete_volume(FAKE_VOLUME) modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', '192.168.100.1') modify_conf(self.fake_conf_file, 'iSCSI/Initiator', 'iqn.1993-08.debian:01:ec2bff7ac3a3', attrib='Name') def test_volume_type(self): ctxt = context.get_admin_context() extra_specs = {'drivers:LUNType': 'Thin'} type_ref = volume_types.create(ctxt, 'THIN', extra_specs) FAKE_VOLUME['volume_type_id'] = type_ref['id'] self.driver.create_volume(FAKE_VOLUME) self.assertEqual(LUN_INFO["ID"], VOLUME_SNAP_ID['vol']) self.assertEqual(LUN_INFO['Lun Type'], 'THIN') self.driver.delete_volume(FAKE_VOLUME) FAKE_VOLUME['volume_type_id'] = None # Test volume type invalid extra_specs = {'drivers:InvalidLUNType': 'Thin'} type_ref = volume_types.create(ctxt, 'Invalid_THIN', extra_specs) FAKE_VOLUME['volume_type_id'] = type_ref['id'] self.driver.create_volume(FAKE_VOLUME) self.assertEqual(LUN_INFO["ID"], VOLUME_SNAP_ID['vol']) self.assertNotEqual(LUN_INFO['Lun Type'], 'THIN') self.driver.delete_volume(FAKE_VOLUME) FAKE_VOLUME['volume_type_id'] = None def test_create_delete_volume(self): # Test create lun cli exception set_error_flg('createlun') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, FAKE_VOLUME) ret = self.driver.create_volume(FAKE_VOLUME) self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) self.assertEqual(ret['provider_location'], LUN_INFO['ID']) # Test delete lun cli exception set_error_flg('dellun') self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, FAKE_VOLUME) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) self.assertIsNone(FAKE_VOLUME['provider_location']) def test_create_delete_cloned_volume(self): # Test no source volume self.assertRaises(exception.VolumeNotFound, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) self.driver.create_volume(FAKE_VOLUME) # Test create luncopy failed self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) set_error_flg('createluncopy') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) self.driver.delete_volume(FAKE_CLONED_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) # Test start luncopy failed self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) set_error_flg('chgluncopystatus') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) # Test luncopy status abnormal LUNCOPY_SETTING['Status'] = 'Disable' self.assertEqual(LUN_INFO['ID'], '0') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) LUNCOPY_SETTING['Status'] = 'Normal' # Test delete luncopy failed set_error_flg('delluncopy') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) self.driver.delete_volume(FAKE_CLONED_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) # need to clean up LUNCopy LUNCOPY_INFO['Name'] = None LUNCOPY_INFO['ID'] = None LUNCOPY_INFO['Type'] = None LUNCOPY_INFO['State'] = None LUNCOPY_INFO['Status'] = None # Test normal create and delete cloned volume self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) ret = self.driver.create_cloned_volume(FAKE_CLONED_VOLUME, FAKE_VOLUME) self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) self.driver.delete_volume(FAKE_CLONED_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) self.assertIsNone(FAKE_CLONED_VOLUME['provider_location']) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def test_extend_volume(self): VOLUME_SIZE = 5 # Test no extended volume self.assertRaises(exception.VolumeNotFound, self.driver.extend_volume, FAKE_VOLUME, VOLUME_SIZE) self.driver.create_volume(FAKE_VOLUME) self.assertEqual(LUN_INFO['Size'], '2') # Test extend volume cli exception set_error_flg('addluntoextlun') self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, FAKE_VOLUME, VOLUME_SIZE) self.assertEqual(CLONED_LUN_INFO['Name'], None) self.driver.extend_volume(FAKE_VOLUME, VOLUME_SIZE) self.assertEqual(LUN_INFO['Size'], VOLUME_SIZE) self.driver.delete_volume(FAKE_VOLUME) self.assertEqual(LUN_INFO['Name'], None) def test_create_delete_snapshot(self): # Test no resource pool RESPOOL_A_SIM['Valid Size'] = '0' self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, FAKE_SNAPSHOT) RESPOOL_A_SIM['Valid Size'] = '5120' # Test no source volume self.assertRaises(exception.VolumeNotFound, self.driver.create_snapshot, FAKE_SNAPSHOT) # Test create snapshot cli exception self.driver.create_volume(FAKE_VOLUME) set_error_flg('createsnapshot') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, FAKE_SNAPSHOT) self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) # Test active snapshot failed set_error_flg('actvsnapshot') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, FAKE_SNAPSHOT) self.assertIsNone(SNAPSHOT_INFO['ID']) self.assertIsNone(SNAPSHOT_INFO['Status']) # Test disable snapshot failed set_error_flg('disablesnapshot') self.driver.create_snapshot(FAKE_SNAPSHOT) self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, FAKE_SNAPSHOT) self.assertEqual(SNAPSHOT_INFO['Status'], 'Active') # Test delsnapshot failed set_error_flg('delsnapshot') self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, FAKE_SNAPSHOT) self.assertEqual(SNAPSHOT_INFO['Status'], 'Disable') self.driver.delete_snapshot(FAKE_SNAPSHOT) # Test normal create and delete snapshot self.driver.create_volume(FAKE_VOLUME) ret = self.driver.create_snapshot(FAKE_SNAPSHOT) self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) self.assertEqual(SNAPSHOT_INFO['Status'], 'Active') self.assertEqual(ret['provider_location'], SNAPSHOT_INFO['ID']) self.driver.delete_snapshot(FAKE_SNAPSHOT) self.assertIsNone(SNAPSHOT_INFO['ID']) self.assertIsNone(SNAPSHOT_INFO['Status']) def test_create_delete_snapshot_volume(self): # Test no source snapshot self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) # Test normal create and delete snapshot volume self.driver.create_volume(FAKE_VOLUME) self.driver.create_snapshot(FAKE_SNAPSHOT) self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) ret = self.driver.create_volume_from_snapshot(FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) self.driver.delete_snapshot(FAKE_SNAPSHOT) self.driver.delete_volume(FAKE_VOLUME) self.driver.delete_volume(FAKE_CLONED_VOLUME) self.assertIsNone(LUN_INFO['ID']) self.assertIsNone(CLONED_LUN_INFO['ID']) self.assertIsNone(SNAPSHOT_INFO['ID']) def test_initialize_connection(self): # Test can not get iscsi iqn set_error_flg('showiscsitgtname') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test failed to get iSCSI port info set_error_flg('showiscsiip') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test create hostgroup failed set_error_flg('createhostgroup') MAP_INFO['Host Group ID'] = None MAP_INFO['Host Group Name'] = None self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test create host failed set_error_flg('addhost') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test add iSCSI initiator failed set_error_flg('addiscsiini') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test add hostport failed set_error_flg('addhostport') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test no volume FAKE_VOLUME['provider_location'] = '100' self.assertRaises(exception.VolumeNotFound, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) FAKE_VOLUME['provider_location'] = None # Test map volume failed self.driver.create_volume(FAKE_VOLUME) set_error_flg('addhostmap') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test normal initialize connection self.assertEqual(FAKE_VOLUME['provider_location'], VOLUME_SNAP_ID['vol']) self.assertEqual(LUN_INFO['Owner Controller'], 'A') ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) iscsi_propers = ret['data'] self.assertEqual(iscsi_propers['target_iqn'], INITIATOR_SETTING['TargetIQN-form']) self.assertEqual(iscsi_propers['target_portal'], INITIATOR_SETTING['Initiator TargetIP'] + ':3260') self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID']) self.assertEqual(MAP_INFO["INI Port Info"], FAKE_CONNECTOR['initiator']) self.assertEqual(LUN_INFO['Owner Controller'], 'B') self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def test_terminate_connection(self): # Test no host was found self.assertRaises(exception.HostNotFound, self.driver.terminate_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test no volume was found self.driver .create_volume(FAKE_VOLUME) self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) FAKE_VOLUME['provider_location'] = None self.assertRaises(exception.VolumeNotFound, self.driver.terminate_connection, FAKE_VOLUME, FAKE_CONNECTOR) FAKE_VOLUME['provider_location'] = LUN_INFO['ID'] # Test delete map failed set_error_flg('delhostmap') self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Delete hostport failed set_error_flg('delhostport') self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test delete initiator failed set_error_flg('deliscsiini') self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test delete host failed set_error_flg('delhost') self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, FAKE_VOLUME, FAKE_CONNECTOR) # Test normal terminate connection self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) self.assertIsNone(MAP_INFO["DEV LUN ID"]) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def test_get_volume_stats(self): stats = self.driver.get_volume_stats(True) free_capacity = float(POOL_SETTING['Free Capacity']) / 1024 self.assertEqual(stats['free_capacity_gb'], free_capacity) self.assertEqual(stats['storage_protocol'], 'iSCSI') class HuaweiTFCDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(HuaweiTFCDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiTFCDriverTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp_dir) self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' self.addCleanup(os.remove, self.fake_conf_file) create_fake_conf_file(self.fake_conf_file) modify_conf(self.fake_conf_file, 'Storage/Protocol', 'FC') self.configuration = mox.MockObject(conf.Configuration) self.configuration.cinder_huawei_conf_file = self.fake_conf_file self.configuration.append_config_values(mox.IgnoreArg()) self.stubs.Set(time, 'sleep', Fake_sleep) self.stubs.Set(utils, 'SSHPool', FakeSSHPool) self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode', Fake_change_file_mode) self._init_driver() def _init_driver(self): Curr_test[0] = 'T' self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_validate_connector_failed(self): invalid_connector = {'host': 'testhost'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.validate_connector, invalid_connector) def test_create_delete_volume(self): self.driver.create_volume(FAKE_VOLUME) self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def test_create_delete_snapshot(self): self.driver.create_volume(FAKE_VOLUME) self.driver.create_snapshot(FAKE_SNAPSHOT) self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) self.driver.delete_snapshot(FAKE_SNAPSHOT) self.assertIsNone(SNAPSHOT_INFO['ID']) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def test_create_cloned_volume(self): self.driver.create_volume(FAKE_VOLUME) ret = self.driver.create_cloned_volume(FAKE_CLONED_VOLUME, FAKE_VOLUME) self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) self.driver.delete_volume(FAKE_CLONED_VOLUME) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) self.assertIsNone(LUN_INFO['ID']) def test_create_snapshot_volume(self): self.driver.create_volume(FAKE_VOLUME) self.driver.create_snapshot(FAKE_SNAPSHOT) ret = self.driver.create_volume_from_snapshot(FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) self.driver.delete_volume(FAKE_CLONED_VOLUME) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(CLONED_LUN_INFO['ID']) self.assertIsNone(LUN_INFO['ID']) def test_initialize_terminitat_connection(self): self.driver.create_volume(FAKE_VOLUME) ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) fc_properties = ret['data'] self.assertEqual(fc_properties['target_wwn'], INITIATOR_SETTING['WWN']) self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID']) self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) self.assertIsNone(MAP_INFO["DEV LUN ID"]) self.assertIsNone(MAP_INFO["Host LUN ID"]) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def _test_get_volume_stats(self): stats = self.driver.get_volume_stats(True) fakecapacity = float(POOL_SETTING['Free Capacity']) / 1024 self.assertEqual(stats['free_capacity_gb'], fakecapacity) self.assertEqual(stats['storage_protocol'], 'FC') class HuaweiDorado5100FCDriverTestCase(HuaweiTFCDriverTestCase): def __init__(self, *args, **kwargs): super(HuaweiDorado5100FCDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiDorado5100FCDriverTestCase, self).setUp() def _init_driver(self): Curr_test[0] = 'Dorado5100' modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_create_cloned_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) def test_create_snapshot_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) class HuaweiDorado2100G2FCDriverTestCase(HuaweiTFCDriverTestCase): def __init__(self, *args, **kwargs): super(HuaweiDorado2100G2FCDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiDorado2100G2FCDriverTestCase, self).setUp() def _init_driver(self): Curr_test[0] = 'Dorado2100G2' modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_create_cloned_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) def test_create_delete_snapshot(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, FAKE_SNAPSHOT) def test_create_snapshot_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) def test_extend_volume(self): NEWSIZE = 5 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, FAKE_VOLUME, NEWSIZE) class HuaweiDorado5100ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase): def __init__(self, *args, **kwargs): super(HuaweiDorado5100ISCSIDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiDorado5100ISCSIDriverTestCase, self).setUp() def _init_driver(self): Curr_test[0] = 'Dorado5100' modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_create_delete_cloned_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) def test_create_delete_snapshot_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) def test_volume_type(self): pass class HuaweiDorado2100G2ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase): def __init__(self, *args, **kwargs): super(HuaweiDorado2100G2ISCSIDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiDorado2100G2ISCSIDriverTestCase, self).setUp() def _init_driver(self): Curr_test[0] = 'Dorado2100G2' modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_conf_invalid(self): pass def test_create_delete_cloned_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, FAKE_CLONED_VOLUME, FAKE_VOLUME) def test_create_delete_snapshot(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, FAKE_SNAPSHOT) def test_create_delete_snapshot_volume(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) def test_initialize_connection(self): self.driver.create_volume(FAKE_VOLUME) ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) iscsi_propers = ret['data'] self.assertEqual(iscsi_propers['target_iqn'], INITIATOR_SETTING['TargetIQN-form']) self.assertEqual(iscsi_propers['target_portal'], INITIATOR_SETTING['Initiator TargetIP'] + ':3260') self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID']) self.assertEqual(MAP_INFO["INI Port Info"], FAKE_CONNECTOR['initiator']) self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) self.driver.delete_volume(FAKE_VOLUME) self.assertIsNone(LUN_INFO['ID']) def test_extend_volume(self): NEWSIZE = 5 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, FAKE_VOLUME, NEWSIZE) class SSHMethodTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(SSHMethodTestCase, self).__init__(*args, **kwargs) def setUp(self): super(SSHMethodTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp_dir) self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' self.addCleanup(os.remove, self.fake_conf_file) create_fake_conf_file(self.fake_conf_file) self.configuration = mox.MockObject(conf.Configuration) self.configuration.cinder_huawei_conf_file = self.fake_conf_file self.configuration.append_config_values(mox.IgnoreArg()) self.stubs.Set(time, 'sleep', Fake_sleep) self.stubs.Set(utils, 'SSHPool', FakeSSHPool) self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode', Fake_change_file_mode) Curr_test[0] = 'T' self.driver = HuaweiVolumeDriver(configuration=self.configuration) self.driver.do_setup(None) def test_reach_max_connection_limit(self): self.stubs.Set(FakeChannel, 'recv', self._fake_recv1) self.assertRaises(exception.CinderException, self.driver.create_volume, FAKE_VOLUME) def test_socket_timeout(self): self.stubs.Set(FakeChannel, 'recv', self._fake_recv2) self.assertRaises(socket.timeout, self.driver.create_volume, FAKE_VOLUME) def _fake_recv1(self, nbytes): return "No response message" def _fake_recv2(self, nBytes): raise socket.timeout() class HuaweiUtilsTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(HuaweiUtilsTestCase, self).__init__(*args, **kwargs) def setUp(self): super(HuaweiUtilsTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp_dir) self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' self.addCleanup(os.remove, self.fake_conf_file) create_fake_conf_file(self.fake_conf_file) def test_parse_xml_file_ioerror(self): tmp_fonf_file = '/xxx/cinder_huawei_conf.xml' self.assertRaises(IOError, huawei_utils.parse_xml_file, tmp_fonf_file) def test_is_xml_item_exist(self): root = huawei_utils.parse_xml_file(self.fake_conf_file) res = huawei_utils.is_xml_item_exist(root, 'Storage/UserName') self.assertTrue(res) res = huawei_utils.is_xml_item_exist(root, 'xxx') self.assertFalse(res) res = huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'Name') self.assertTrue(res) res = huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'xxx') self.assertFalse(res) def test_is_xml_item_valid(self): root = huawei_utils.parse_xml_file(self.fake_conf_file) res = huawei_utils.is_xml_item_valid(root, 'LUN/LUNType', ['Thin', 'Thick']) self.assertTrue(res) res = huawei_utils.is_xml_item_valid(root, 'LUN/LUNType', ['test']) self.assertFalse(res) res = huawei_utils.is_xml_item_valid(root, 'Host', ['Linux', 'Windows'], 'OSType') self.assertTrue(res) res = huawei_utils.is_xml_item_valid(root, 'Host', ['test'], 'OSType') self.assertFalse(res) def test_get_conf_host_os_type(self): # Default os is Linux res = huawei_utils.get_conf_host_os_type('10.10.10.1', self.fake_conf_file) self.assertEqual(res, '0') modify_conf(self.fake_conf_file, 'Host', 'Windows', 'OSType') res = huawei_utils.get_conf_host_os_type(FAKE_CONNECTOR['ip'], self.fake_conf_file) self.assertEqual(res, '1')
########################################################################## # # Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved. # Copyright (c) 2011, John Haddon. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import os import os.path import threading import math import shutil import imath import IECore import IECoreScene import IECoreImage import IECoreGL IECoreGL.init( False ) class TestRenderer( unittest.TestCase ) : def testOptions( self ) : os.environ["IECOREGL_TEXTURE_PATHS"] = "textureDefault" os.environ["IECOREGL_SHADER_PATHS"] = "shaderDefault" r = IECoreGL.Renderer() self.assertEqual( r.typeName(), "IECoreGL::Renderer" ) self.assertEqual( r.getOption( "searchPath:texture" ), IECore.StringData( "textureDefault" ) ) self.assertEqual( r.getOption( "gl:searchPath:texture" ), IECore.StringData( "textureDefault" ) ) r.setOption( "searchPath:texture", IECore.StringData( "a" ) ) self.assertEqual( r.getOption( "searchPath:texture" ), IECore.StringData( "a" ) ) self.assertEqual( r.getOption( "gl:searchPath:texture" ), IECore.StringData( "a" ) ) r.setOption( "gl:searchPath:texture", IECore.StringData( "b" ) ) self.assertEqual( r.getOption( "searchPath:texture" ), IECore.StringData( "b" ) ) self.assertEqual( r.getOption( "gl:searchPath:texture" ), IECore.StringData( "b" ) ) self.assertEqual( r.getOption( "searchPath:shader" ), IECore.StringData( "shaderDefault" ) ) self.assertEqual( r.getOption( "gl:searchPath:shader" ), IECore.StringData( "shaderDefault" ) ) r.setOption( "searchPath:shader", IECore.StringData( "s" ) ) self.assertEqual( r.getOption( "searchPath:shader" ), IECore.StringData( "s" ) ) self.assertEqual( r.getOption( "gl:searchPath:shader" ), IECore.StringData( "s" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( "t" ) ) self.assertEqual( r.getOption( "searchPath:shader" ), IECore.StringData( "t" ) ) self.assertEqual( r.getOption( "gl:searchPath:shader" ), IECore.StringData( "t" ) ) self.assertEqual( r.getOption( "shutter" ), IECore.V2fData( imath.V2f( 0 ) ) ) r.setOption( "shutter", IECore.V2fData( imath.V2f( 1, 2 ) ) ) self.assertEqual( r.getOption( "shutter" ), IECore.V2fData( imath.V2f( 1, 2 ) ) ) self.assertEqual( r.getOption( "gl:drawCoordinateSystems" ), IECore.BoolData( False ) ) r.setOption( "gl:drawCoordinateSystems", IECore.BoolData( True ) ) self.assertEqual( r.getOption( "gl:drawCoordinateSystems" ), IECore.BoolData( True ) ) def testAttributes( self ) : deferred = IECoreGL.Renderer() deferred.setOption( "gl:mode", IECore.StringData( "deferred" ) ) immediate = IECoreGL.Renderer() immediate.setOption( "gl:mode", IECore.StringData( "immediate" ) ) for r in [ deferred, immediate ] : r.worldBegin() self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( imath.Color3f( 1 ) ) ) self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( imath.Color3f( 1 ) ) ) self.assertEqual( r.getAttribute( "gl:color" ), IECore.Color4fData( imath.Color4f( 1 ) ) ) self.assertEqual( r.getAttribute( "gl:blend:color" ), IECore.Color4fData( imath.Color4f( 1 ) ) ) self.assertEqual( r.getAttribute( "gl:blend:srcFactor" ), IECore.StringData( "srcAlpha" ) ) self.assertEqual( r.getAttribute( "gl:blend:dstFactor" ), IECore.StringData( "oneMinusSrcAlpha" ) ) self.assertEqual( r.getAttribute( "gl:blend:equation" ), IECore.StringData( "add" ) ) self.assertEqual( r.getAttribute( "gl:shade:transparent" ), IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:primitive:sortForTransparency" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "name" ), IECore.StringData( "unnamed" ) ) self.assertEqual( r.getAttribute( "doubleSided" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:smoothing:points" ), IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:smoothing:lines" ), IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:smoothing:polygons" ), IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:procedural:reentrant" ), IECore.BoolData( True ) ) if IECore.withFreeType() : self.assertEqual( r.getAttribute( "gl:textPrimitive:type" ), IECore.StringData( "mesh" ) ) self.assertEqual( r.getAttribute( "gl:depthTest" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:depthMask" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:alphaTest" ), IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:alphaTest:mode" ), IECore.StringData( "always" ) ) self.assertEqual( r.getAttribute( "gl:alphaTest:value" ), IECore.FloatData( 0.0 ) ) self.assertEqual( r.getAttribute( "gl:visibility:camera" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:automaticInstancing" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "automaticInstancing" ), IECore.BoolData( True ) ) r.setAttribute( "color", IECore.Color3fData( imath.Color3f( 0, 1, 2 ) ) ) self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( imath.Color3f( 0, 1, 2 ) ) ) # opacity is an odd one - it's set as a color but as it's averaged internally # the result you get should be a greyscale value. r.setAttribute( "opacity", IECore.Color3fData( imath.Color3f( 3, 1, 2 ) ) ) self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( imath.Color3f( 2 ) ) ) self.assertEqual( r.getAttribute( "gl:color" ), IECore.Color4fData( imath.Color4f( 0, 1, 2, 2 ) ) ) r.setAttribute( "gl:color", IECore.Color4fData( imath.Color4f( 1, 2, 3, 4 ) ) ) self.assertEqual( r.getAttribute( "gl:color" ), IECore.Color4fData( imath.Color4f( 1, 2, 3, 4 ) ) ) r.setAttribute( "gl:blend:color", IECore.Color4fData( imath.Color4f( 0, 1, 0, 1 ) ) ) self.assertEqual( r.getAttribute( "gl:blend:color" ), IECore.Color4fData( imath.Color4f( 0, 1, 0, 1 ) ) ) r.attributeBegin() r.setAttribute( "color", IECore.Color3fData( imath.Color3f( 0 ) ) ) self.assertEqual( r.getAttribute( "gl:color" ), IECore.Color4fData( imath.Color4f( 0, 0, 0, 4 ) ) ) r.attributeEnd() self.assertEqual( r.getAttribute( "gl:color" ), IECore.Color4fData( imath.Color4f( 1, 2, 3, 4 ) ) ) factors = [ "zero", "one", "srcColor", "oneMinusSrcColor", "dstColor", "oneMinusDstColor", "srcAlpha", "oneMinusSrcAlpha", "dstAlpha", "oneMinusDstAlpha", "dstAlpha", "oneMinusDstAlpha", "constantColor", "oneMinusConstantColor", "constantAlpha", "oneMinusConstantAlpha" ] for f in factors : last = r.getAttribute( "gl:blend:dstFactor" ) r.setAttribute( "gl:blend:srcFactor", IECore.StringData( f ) ) self.assertEqual( r.getAttribute( "gl:blend:srcFactor" ), IECore.StringData( f ) ) self.assertEqual( r.getAttribute( "gl:blend:dstFactor" ), last ) last = r.getAttribute( "gl:blend:srcFactor" ) r.setAttribute( "gl:blend:dstFactor", IECore.StringData( f ) ) self.assertEqual( r.getAttribute( "gl:blend:srcFactor" ), IECore.StringData( f ) ) self.assertEqual( r.getAttribute( "gl:blend:dstFactor" ), last ) for e in ["add", "subtract", "reverseSubtract", "min", "max"] : r.setAttribute( "gl:blend:equation", IECore.StringData( e ) ) self.assertEqual( r.getAttribute( "gl:blend:equation" ), IECore.StringData( e ) ) r.setAttribute( "name", IECore.StringData( "sphere" ) ) self.assertEqual( r.getAttribute( "name" ), IECore.StringData( "sphere" ) ) r.setAttribute( "doubleSided", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "doubleSided" ), IECore.BoolData( False ) ) r.setAttribute( "gl:smoothing:points", IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:smoothing:points" ), IECore.BoolData( True ) ) r.setAttribute( "gl:smoothing:lines", IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:smoothing:lines" ), IECore.BoolData( True ) ) r.setAttribute( "gl:smoothing:polygons", IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:smoothing:polygons" ), IECore.BoolData( True ) ) r.setAttribute( "gl:procedural:reentrant", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:procedural:reentrant" ), IECore.BoolData( False ) ) if IECore.withFreeType() : r.setAttribute( "gl:textPrimitive:type", IECore.StringData( "sprite" ) ) self.assertEqual( r.getAttribute( "gl:textPrimitive:type" ), IECore.StringData( "sprite" ) ) r.setAttribute( "gl:depthTest", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:depthTest" ), IECore.BoolData( False ) ) r.setAttribute( "gl:depthMask", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:depthMask" ), IECore.BoolData( False ) ) r.setAttribute( "gl:alphaTest", IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:alphaTest" ), IECore.BoolData( True ) ) alphaTestModes = [ "never", "less", "equal", "lequal", "greater", "notequal", "gequal", "always" ] value = 0.1 for m in alphaTestModes : last = r.getAttribute( "gl:alphaTest:value" ) r.setAttribute( "gl:alphaTest:mode", IECore.StringData( m ) ) self.assertEqual( r.getAttribute( "gl:alphaTest:mode" ), IECore.StringData( m ) ) self.assertEqual( r.getAttribute( "gl:alphaTest:value" ), last ) last = r.getAttribute( "gl:alphaTest:mode" ) r.setAttribute( "gl:alphaTest:value", IECore.FloatData( value ) ) self.assertEqual( r.getAttribute( "gl:alphaTest:value" ), IECore.FloatData( value ) ) self.assertEqual( r.getAttribute( "gl:alphaTest:mode" ), last ) value += 0.05 r.setAttribute( "gl:visibility:camera", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:visibility:camera" ), IECore.BoolData( False ) ) r.setAttribute( "gl:automaticInstancing", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "gl:automaticInstancing" ), IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "automaticInstancing" ), IECore.BoolData( False ) ) r.setAttribute( "automaticInstancing", IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "automaticInstancing" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "gl:automaticInstancing" ), IECore.BoolData( True ) ) r.worldEnd() def testOtherRendererAttributes( self ) : """Attributes destined for other renderers should be silently ignored.""" deferred = IECoreGL.Renderer() deferred.setOption( "gl:mode", IECore.StringData( "deferred" ) ) immediate = IECoreGL.Renderer() immediate.setOption( "gl:mode", IECore.StringData( "immediate" ) ) with IECore.CapturingMessageHandler() as handler : for r in [ deferred, immediate ] : r.worldBegin() r.setAttribute( "ri:visibility:diffuse", IECore.IntData( 0 ) ) r.worldEnd() self.assertEqual( len( handler.messages ), 0 ) def testStackBug( self ) : # This should produce a yellow sphere in between two red spheres. It does in the DeferredRenderer but # currently fails in the ImmediateRenderer. r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "immediate" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.display( os.path.dirname( __file__ ) + "/output/testStackBug.tif", "tiff", "rgba", {} ) r.worldBegin() r.shader( "surface", "rgbColor", { "red" : IECore.FloatData( 1 ), "green" : IECore.FloatData( 0 ), "blue" : IECore.FloatData( 0 ) } ) r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -5 ) ) ) r.attributeBegin() r.shader( "surface", "rgbColor", { "red" : IECore.FloatData( 1 ), "green" : IECore.FloatData( 1 ), "blue" : IECore.FloatData( 0 ) } ) r.sphere( 1, -1, 1, 360, {} ) r.attributeEnd() r.concatTransform( imath.M44f().translate( imath.V3f( -1, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, {} ) r.concatTransform( imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, {} ) r.worldEnd() i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/testStackBug.tif" ).read() dimensions = i.dataWindow.size() + imath.V2i( 1 ) index = dimensions.x * int(dimensions.y * 0.5) + int(dimensions.x * 0.5) self.assertEqual( i["R"][index], 1 ) self.assertEqual( i["G"][index], 1 ) self.assertEqual( i["B"][index], 0 ) index = dimensions.x * int(dimensions.y * 0.5) self.assertEqual( i["R"][index], 1 ) self.assertEqual( i["G"][index], 0 ) self.assertEqual( i["B"][index], 0 ) index = dimensions.x * int(dimensions.y * 0.5) + int(dimensions.x * 1) - 1 self.assertEqual( i["R"][index], 1 ) self.assertEqual( i["G"][index], 0 ) self.assertEqual( i["B"][index], 0 ) def testPrimVars( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "immediate" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.display( os.path.dirname( __file__ ) + "/output/testPrimVars.tif", "tiff", "rgba", {} ) r.worldBegin() r.shader( "surface", "rgbColor", {} ) r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -5 ) ) ) r.attributeBegin() # should make red, green and blue spheres r.sphere( 1, -1, 1, 360, { "red" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) ), "green" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0 ) ), "blue" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0 ) ), } ) r.attributeEnd() r.concatTransform( imath.M44f().translate( imath.V3f( -1, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, { "red" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0 ) ), "green" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) ), "blue" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0 ) ), } ) r.concatTransform( imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, { "red" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0 ) ), "green" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0 ) ), "blue" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) ), } ) r.worldEnd() i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/testPrimVars.tif" ).read() dimensions = i.dataWindow.size() + imath.V2i( 1 ) index = dimensions.x * int(dimensions.y * 0.5) self.assertEqual( i["R"][index], 0 ) self.assertEqual( i["G"][index], 1 ) self.assertEqual( i["B"][index], 0 ) index = dimensions.x * int(dimensions.y * 0.5) + int(dimensions.x * 0.5) self.assertEqual( i["R"][index], 1 ) self.assertEqual( i["G"][index], 0 ) self.assertEqual( i["B"][index], 0 ) index = dimensions.x * int(dimensions.y * 0.5) + int(dimensions.x * 1) - 1 self.assertEqual( i["R"][index], 0 ) self.assertEqual( i["G"][index], 0 ) self.assertEqual( i["B"][index], 1 ) ## \todo Make this assert something def testShader( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( os.path.dirname( __file__ ) + "/shaders/include" ) ) r.worldBegin() r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -5 ) ) ) r.sphere( 1, -1, 1, 360, {} ) r.worldEnd() s = r.scene() s.render( IECoreGL.State( True ) ) def __countChildrenRecursive( self, g ) : if not isinstance( g, IECoreGL.Group ): return 1 count = 0 for c in g.children(): count += self.__countChildrenRecursive( c ) return count def testEdits( self ): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.worldBegin() r.worldEnd() with IECore.CapturingMessageHandler() as handler : r.attributeBegin() r.setAttribute( "gl:color", IECore.Color4fData( imath.Color4f( 1, 2, 3, 4 ) ) ) r.attributeEnd() self.assertEqual( len( handler.messages ), 3 ) with IECore.CapturingMessageHandler() as handler : r.command( "editBegin", {} ) r.attributeBegin() r.setAttribute( "gl:color", IECore.Color4fData( imath.Color4f( 1, 2, 3, 4 ) ) ) r.attributeEnd() r.command( "editEnd", {} ) self.assertEqual( len( handler.messages ), 0 ) def testRemoveObject( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) with IECoreScene.WorldBlock( r ) : r.setAttribute( "name", "sphereOne" ) r.sphere( 1, -1, 1, 360, {} ) r.setAttribute( "name", "sphereTwo" ) r.sphere( 1, -1, 1, 360, {} ) with IECoreScene.AttributeBlock( r ) : r.sphere( 1, -1, 1, 360, {} ) r.setAttribute( "name", "sphereOne" ) r.sphere( 1, -1, 1, 360, {} ) r.sphere( 1, -1, 1, 360, {} ) r.sphere( 1, -1, 1, 360, {} ) s = r.scene() self.assertEqual( len( s.root().children() ), 3 ) # check that trying to remove objects when not in an editBegin/editEnd block # fails and prints a message errorCatcher = IECore.CapturingMessageHandler() with errorCatcher : commandResult = r.command( "removeObject", { "name" : IECore.StringData( "sphereOne" ) } ) self.assertEqual( commandResult, None ) self.assertEqual( len( errorCatcher.messages ), 1 ) # check we can remove one object without affecting the other r.command( "editBegin", {} ) commandResult = r.command( "removeObject", { "name" : IECore.StringData( "sphereOne" ) } ) r.command( "editEnd", {} ) self.assertEqual( commandResult, IECore.BoolData( True ) ) self.assertEqual( len( s.root().children() ), 2 ) self.assertEqual( self.__countChildrenRecursive( s.root() ), 2 ) # now we test that either the sphere and the following attribute block ( instantiates as a Group ) are removed r.command( "editBegin", {} ) commandResult = r.command( "removeObject", { "name" : IECore.StringData( "sphereTwo" ) } ) r.command( "editEnd", {} ) self.assertEqual( commandResult, IECore.BoolData( True ) ) self.assertEqual( len( s.root().children() ), 0 ) def testEditQuery( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) with IECoreScene.WorldBlock( r ) : self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( False ) ) self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( False ) ) r.command( "editBegin", {} ) self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( True ) ) r.command( "editEnd", {} ) self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( False ) ) def testRemoveObjectDuringProcedural( self ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) with IECoreScene.WorldBlock( r ) : r.setAttribute( "name", "sphereOne" ) r.sphere( 1, -1, 1, 360, {} ) r.setAttribute( "name", "sphereTwo" ) r.sphere( 1, -1, 1, 360, {} ) s = r.scene() self.assertEqual( len( s.root().children() ), 2 ) class RemovalProcedural( IECoreScene.Renderer.Procedural ): def __init__( proc ): IECoreScene.Renderer.Procedural.__init__( proc ) def bound( proc ) : return imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) def render( proc, renderer ): commandResult = renderer.command( "removeObject", { "name" : IECore.StringData( "sphereOne" ) } ) self.assertEqual( commandResult, IECore.BoolData( True ) ) def hash( self ): h = IECore.MurmurHash() return h r.command( "editBegin", {} ) r.procedural( RemovalProcedural() ) r.command( "editEnd", {} ) self.assertEqual( len( s.root().children() ), 1 ) self.assertEqual( self.__countChildrenRecursive( r.scene().root() ), 1 ) def testRemoveObjectWithResourcesDuringProcedural( self ) : r = IECoreGL.Renderer() r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) with IECoreScene.WorldBlock( r ) : with IECoreScene.AttributeBlock( r ) : r.setAttribute( "name", "sphereOne" ) r.shader( "surface", "image", { "texture" : IECore.SplinefColor3fData( IECore.SplinefColor3f( IECore.CubicBasisf.catmullRom(), ( ( 0, imath.Color3f( 1 ) ), ( 0, imath.Color3f( 1 ) ), ( 1, imath.Color3f( 0 ) ), ( 1, imath.Color3f( 0 ) ), ), ), ), } ) r.sphere( 1, -1, 1, 360, {} ) s = r.scene() self.assertEqual( len( s.root().children()[0].children() ), 1 ) s.render() class RemovalProcedural( IECoreScene.Renderer.Procedural ): def __init__( proc, level=0 ) : IECoreScene.Renderer.Procedural.__init__( proc ) def bound( proc ) : return imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) def render( proc, renderer ): commandResult = renderer.command( "removeObject", { "name" : IECore.StringData( "sphereOne" ) } ) self.assertEqual( commandResult, IECore.BoolData( True ) ) def hash( self ): h = IECore.MurmurHash() return h r.command( "editBegin", {} ) # typically you wouldn't call a renderer method on a separate thread like this. we're just # doing it here to force the procedural onto a different thread. if left to its own devices # the renderer will run procedurals on different threads, but it equally well might call # them on the main thread. we force the procedural onto a separate thread so we can reliably # exercise a problem we're trying to address. t = threading.Thread( target=IECore.curry( r.procedural, RemovalProcedural() ) ) t.start() t.join() # if an edit session removes objects which use gl resources (shaders, textures etc), # then it's essential that the editEnd call occurs on the thread with the correct gl context. # this is so the gl resources can be deleted in the correct context. r.command( "editEnd", {} ) self.assertEqual( len( s.root().children() ), 0 ) def testParallelRenders( self ): allScenes = [] def threadedRendering(): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( os.path.dirname( __file__ ) + "/shaders/include" ) ) r.worldBegin() r.shader( "surface", "failWithoutPreprocessing", {} ) r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -5 ) ) ) r.worldEnd() allScenes.append( r.scene() ) for i in xrange( 0, 100 ): newThread = threading.Thread(target=threadedRendering) newThread.start() while len(allScenes) < 100 : pass for s in allScenes : s.render( IECoreGL.State( True ) ) class RecursiveProcedural( IECoreScene.Renderer.Procedural ): """Creates a pyramid of spheres""" maxLevel = 5 threadsUsed = set() def __init__( self, level = 0 ): IECoreScene.Renderer.Procedural.__init__( self ) self.__level = level if level == 0 : self.threadsUsed.clear() def bound( self ) : return imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) def render( self, renderer ): # registers this thread id self.threadsUsed.add( threading.currentThread().getName() ) renderer.attributeBegin() renderer.setAttribute( "color", IECore.Color3fData( imath.Color3f( float(self.__level)/self.maxLevel, 0, 1 - float(self.__level)/self.maxLevel ) ) ) renderer.transformBegin() renderer.concatTransform( imath.M44f().translate(imath.V3f( 0, 0.5, 0 )) ) renderer.concatTransform( imath.M44f().scale( imath.V3f(0.5) ) ) renderer.sphere( 1, -1, 1, 360, {} ) renderer.transformEnd() # end of recursion if self.__level < self.maxLevel : renderer.transformBegin() renderer.concatTransform( imath.M44f().translate(imath.V3f( 0, -0.5, 0 )) ) for i in xrange( 0, 2 ) : renderer.transformBegin() renderer.concatTransform( imath.M44f().translate(imath.V3f( (i - 0.5) , 0, 0)) ) renderer.concatTransform( imath.M44f().scale( imath.V3f(0.5) ) ) proc = TestRenderer.RecursiveProcedural( self.__level + 1 ) renderer.procedural( proc ) renderer.transformEnd() renderer.transformEnd() renderer.attributeEnd() def hash( self ): h = IECore.MurmurHash() return h def testMultithreadedProcedural( self ): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( os.path.dirname( __file__ ) + "/shaders/include" ) ) r.worldBegin() p = self.RecursiveProcedural() r.procedural( p ) r.worldEnd() self.assert_( len(self.RecursiveProcedural.threadsUsed) > 1 ) def testParallelMultithreadedProcedurals( self ): renders = [] def newRender(): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( os.path.dirname( __file__ ) + "/shaders/include" ) ) r.worldBegin() p = self.RecursiveProcedural() r.procedural( p ) r.worldEnd() renders.append( 0 ) threads = [] for i in xrange( 0,10 ): newThread = threading.Thread(target=newRender) newThread.start() threads.append( newThread ) for t in threads : t.join() def testDisableProceduralThreading( self ): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( os.path.dirname( __file__ ) + "/shaders/include" ) ) with IECoreScene.WorldBlock( r ) : r.setAttribute( "gl:procedural:reentrant", IECore.BoolData( False ) ) p = self.RecursiveProcedural() r.procedural( p ) self.assertEqual( len( self.RecursiveProcedural.threadsUsed ), 1 ) def testObjectSpaceCulling( self ): p = self.RecursiveProcedural() def renderWithCulling( box ): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.worldBegin() r.sphere( 1.5, 0, 1, 360, {} ) r.procedural( p ) r.attributeBegin() if True: r.setAttribute( "gl:cullingSpace", IECore.StringData( "object" ) ) r.setAttribute( "gl:cullingBox", IECore.Box3fData( box ) ) # everything in this block is culled r.sphere( 1.5, 0, 1, 360, {} ) r.procedural( p ) r.attributeEnd() r.worldEnd() return self.__countChildrenRecursive( r.scene().root() ) noCullingCounter = renderWithCulling( imath.Box3f() ) # verify that only half of the things are renderer when the giving culling box is defined. self.assertEqual( renderWithCulling( imath.Box3f( imath.V3f(2,-1,-1), imath.V3f(3,1,1) ) ) * 2, noCullingCounter ) def testWorldSpaceCulling( self ): p = self.RecursiveProcedural() box = imath.Box3f( imath.V3f(0.001,-1,-1), imath.V3f(1,1,1) ) r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.worldBegin() r.setAttribute( "gl:cullingSpace", IECore.StringData( "world" ) ) r.setAttribute( "gl:cullingBox", IECore.Box3fData( box ) ) r.sphere( 1, 0, 1, 360, {} ) # half-inside : 1 element r.procedural( p ) # half-inside: 32 elements (full procedural renders 63 elements) r.transformBegin() if True: r.concatTransform( imath.M44f().translate( imath.V3f(-2, 0, 0) ) ) # everything in this block is culled r.sphere( 1, 0, 1, 360, {} ) r.procedural( p ) r.transformEnd() r.worldEnd() self.assertEqual( self.__countChildrenRecursive( r.scene().root() ), 33 ) def testTransformsInImmediateRenderer( self ): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "immediate" ) ) r.transformBegin() r.concatTransform( imath.M44f().rotate( imath.V3f( 1, 1, 1 ) ) ) r.camera( "main", { "resolution" : IECore.V2iData( imath.V2i( 512 ) ), "projection" : IECore.StringData( "perspective" ) } ) r.transformEnd() r.worldBegin() # confirm that the camera transformation is not affecting the world space matrix r.concatTransform( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ) self.assert_( r.getTransform().equalWithAbsError( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ), 1e-4 ) ) # confirm that setting the world space transform does not affect the camera matrix (that was already set in openGL ) r.setTransform( imath.M44f().translate( imath.V3f( 0, 1, 0 ) ) ) self.assert_( r.getTransform().equalWithAbsError( imath.M44f().translate( imath.V3f( 0, 1, 0 ) ), 1e-4 ) ) r.worldEnd() def testTransformsInDeferredRenderer( self ): r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.transformBegin() r.concatTransform( imath.M44f().rotate( imath.V3f( 1, 1, 1 ) ) ) r.camera( "main", { "resolution" : IECore.V2iData( imath.V2i( 512 ) ), "projection" : IECore.StringData( "perspective" ) } ) r.transformEnd() r.worldBegin() # confirm that the camera transformation is not affecting the world space matrix self.assert_( r.getTransform().equalWithAbsError( imath.M44f(), 1e-4 ) ) r.concatTransform( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ) r.concatTransform( imath.M44f().rotate( imath.V3f( 1, 1, 1 ) ) ) m = r.getTransform() r.transformBegin() if True: # confirm that the transformBegin did not change the current transform self.assert_( r.getTransform().equalWithAbsError( m, 1e-4 ) ) # confirm that concatenate transform works r.concatTransform( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ) self.assert_( r.getTransform().equalWithAbsError( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) * m, 1e-4 ) ) r.concatTransform( imath.M44f().scale( imath.V3f(0.5) ) ) self.assert_( r.getTransform().equalWithAbsError( imath.M44f().scale( imath.V3f(0.5) ) * imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) * m, 1e-4 ) ) # confirm that setting the world space transform works too m2 = imath.M44f().translate( imath.V3f( 0, 1, 0 ) ) r.setTransform( m2 ) self.assert_( r.getTransform().equalWithAbsError( m2, 1e-4 ) ) r.attributeBegin() if True: # confirm that the attributeBegin did not change the current transform self.assert_( r.getTransform().equalWithAbsError( m2, 1e-4 ) ) # confirm that setting the world space transform works too r.setTransform( imath.M44f().rotate( imath.V3f( 3, 1, 0 ) ) ) self.assert_( r.getTransform().equalWithAbsError( imath.M44f().rotate( imath.V3f( 3, 1, 0 ) ), 1e-4 ) ) r.attributeEnd() # confirms that attributeEnd recovers the matrix. self.assert_( r.getTransform().equalWithAbsError( m2, 1e-4 ) ) r.transformEnd() # confirms that transformEnd recovers the matrix. self.assert_( r.getTransform().equalWithAbsError( m, 1e-4 ) ) r.worldEnd() def testInstances(self): r = IECoreGL.Renderer() r.instanceBegin( "instanceA", {} ) r.concatTransform( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ) r.transformBegin() r.concatTransform( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, {} ) r.concatTransform( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, {} ) r.transformEnd() r.concatTransform( imath.M44f().translate( imath.V3f( -1, 0, 0 ) ) ) r.sphere( 1, -1, 1, 360, {} ) r.instanceEnd() r.instanceBegin( "instanceB", {} ) r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, 10 ) ) ) r.instance( "instanceA" ) r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, 20 ) ) ) r.instance( "instanceA" ) r.instanceEnd() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.worldBegin() r.concatTransform( imath.M44f().translate( imath.V3f( 0, 5, 0 ) ) ) r.instance( "instanceB" ) r.setTransform( imath.M44f().translate( imath.V3f( 0, 10, 0 ) ) ) r.instance( "instanceB" ) r.worldEnd() g = r.scene().root() self.assertEqual( self.__countChildrenRecursive( g ), 12 ) self.assert_( g.bound().min().equalWithAbsError( imath.V3f( -1, 4, 9 ), 0.001 ) ) self.assert_( g.bound().max().equalWithAbsError( imath.V3f( 4, 11, 31 ), 0.001 ) ) def testCuriousCrashOnThreadedProceduralsAndAttribute( self ): myMesh = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob").read() class MyProc( IECoreScene.Renderer.Procedural ): def __init__( self, level = 0 ): IECoreScene.Renderer.Procedural.__init__( self ) self.__level = level def bound( self ) : return imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) def render( self, renderer ): if self.__level < 2 : for i in xrange( 0, 50 ) : renderer.procedural( MyProc( self.__level + 1 ) ) else: g = IECoreScene.Group() g.addChild( myMesh ) g.addState( IECoreScene.AttributeState( { "name" : IECore.StringData( str(self.__level) ) } ) ) g.render( renderer ) def hash( self ): h = IECore.MurmurHash() return h r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( "deferred" ) ) r.worldBegin() p = MyProc() r.procedural( p ) r.worldEnd() def testDepthTest( self ) : def doTest( depthTest, r, g, b ) : renderer = IECoreGL.Renderer() renderer.setOption( "gl:mode", IECore.StringData( "immediate" ) ) renderer.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) renderer.camera( "main", { "projection" : IECore.StringData( "orthographic" ), "resolution" : IECore.V2iData( imath.V2i( 256 ) ), "clippingPlanes" : IECore.V2fData( imath.V2f( 1, 1000 ) ), "screenWindow" : IECore.Box2fData( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) ) } ) renderer.display( os.path.dirname( __file__ ) + "/output/depthTest.tif", "tif", "rgba", {} ) m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) ) with IECoreScene.WorldBlock( renderer ) : renderer.setAttribute( "gl:depthTest", IECore.BoolData( depthTest ) ) renderer.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -1 ) ) ) renderer.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) m.render( renderer ) renderer.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -1 ) ) ) renderer.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ) } ) m.render( renderer ) i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/depthTest.tif" ).read() for p in i["R"] : self.assertEqual( p, r ) for p in i["G"] : self.assertEqual( p, g ) for p in i["B"] : self.assertEqual( p, b ) doTest( True, 1, 0, 0 ) doTest( False, 0, 1, 0 ) def testCameraVisibility( self ) : def doRender( mode, visibility ) : r = IECoreGL.Renderer() r.setOption( "gl:mode", IECore.StringData( mode ) ) r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( "./glsl" ) ) r.camera( "main", { "projection" : IECore.StringData( "perspective" ), "projection:fov" : IECore.FloatData( 20 ), "resolution" : IECore.V2iData( imath.V2i( 256 ) ), "clippingPlanes" : IECore.V2fData( imath.V2f( 1, 1000 ) ), "screenWindow" : IECore.Box2fData( imath.Box2f( imath.V2f( -3 ), imath.V2f( 3 ) ) ) } ) if mode=="immediate" : r.display( os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif", "tif", "rgba", {} ) with IECoreScene.WorldBlock( r ) : r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, -5 ) ) ) r.setAttribute( "gl:visibility:camera", IECore.BoolData( visibility ) ) r.points( 1, { "P" : IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0 ) ] ) ) } ) return r # test immediate renderer by checking images doRender( "immediate", True ) i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif" ).read() self.failUnless( i["A"][256 * 128 + 128] > .99 ) doRender( "immediate", False ) i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif" ).read() self.assertEqual( i["A"], IECore.FloatVectorData( [ 0 ] * 256 * 256 ) ) # test deferred renderer by checking scene r = doRender( "deferred", True ) self.assertEqual( len( r.scene().root().children()[0].children() ), 1 ) r = doRender( "deferred", False ) self.assertEqual( len( r.scene().root().children() ), 0 ) def testWarningMessages( self ): r = IECoreGL.Renderer() r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) ) # gl renderer only supports "surface" shaders, so it should complain about this: c = IECore.CapturingMessageHandler() with c : with IECoreScene.WorldBlock( r ): r.shader( "shader", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) self.assertEqual( len( c.messages ), 1 ) self.assertEqual( c.messages[0].level, IECore.Msg.Level.Warning ) # it should just ignore this, because of the "ri:" prefix: c = IECore.CapturingMessageHandler() with c : with IECoreScene.WorldBlock( r ): r.shader( "ri:shader", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) self.assertEqual( len( c.messages ), 0 ) # this should work fine: c = IECore.CapturingMessageHandler() with c : with IECoreScene.WorldBlock( r ): r.shader( "gl:surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) self.assertEqual( len( c.messages ), 0 ) # it should just ignore this, because of the "lg:" prefix: c = IECore.CapturingMessageHandler() with c : with IECoreScene.WorldBlock( r ): r.shader( "lg:shader", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) self.assertEqual( len( c.messages ), 0 ) # this aint right!: c = IECore.CapturingMessageHandler() with c : with IECoreScene.WorldBlock( r ): r.shader( "gl:nonsense", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } ) self.assertEqual( len( c.messages ), 1 ) self.assertEqual( c.messages[0].level, IECore.Msg.Level.Warning ) def setUp( self ) : if not os.path.isdir( "test/IECoreGL/output" ) : os.makedirs( "test/IECoreGL/output" ) def tearDown( self ) : if os.path.isdir( "test/IECoreGL/output" ) : shutil.rmtree( "test/IECoreGL/output" ) if __name__ == "__main__": unittest.main()
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Home of the `Sequential` model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.keras import layers as layer_module from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine.input_layer import Input from tensorflow.python.keras.engine.input_layer import InputLayer from tensorflow.python.keras.engine.network import Network from tensorflow.python.keras.engine.training import Model from tensorflow.python.keras.utils import layer_utils from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.checkpointable import base as checkpointable from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @keras_export('keras.models.Sequential', 'keras.Sequential') class Sequential(Model): """Linear stack of layers. Arguments: layers: list of layers to add to the model. Example: ```python # Optionally, the first layer can receive an `input_shape` argument: model = Sequential() model.add(Dense(32, input_shape=(500,))) # Afterwards, we do automatic shape inference: model.add(Dense(32)) # This is identical to the following: model = Sequential() model.add(Dense(32, input_dim=500)) # And to the following: model = Sequential() model.add(Dense(32, batch_input_shape=(None, 500))) # Note that you can also omit the `input_shape` argument: # In that case the model gets built the first time you call `fit` (or other # training and evaluation methods). model = Sequential() model.add(Dense(32)) model.add(Dense(32)) model.compile(optimizer=optimizer, loss=loss) # This builds the model for the first time: model.fit(x, y, batch_size=32, epochs=10) # Note that when using this delayed-build pattern (no input shape specified), # the model doesn't have any weights until the first call # to a training/evaluation method (since it isn't yet built): model = Sequential() model.add(Dense(32)) model.add(Dense(32)) model.weights # returns [] # Whereas if you specify the input shape, the model gets built continuously # as you are adding layers: model = Sequential() model.add(Dense(32, input_shape=(500,))) model.add(Dense(32)) model.weights # returns list of length 4 # When using the delayed-build pattern (no input shape specified), you can # choose to manually build your model by calling `build(batch_input_shape)`: model = Sequential() model.add(Dense(32)) model.add(Dense(32)) model.build((None, 500)) model.weights # returns list of length 4 ``` """ @checkpointable.no_automatic_dependency_tracking def __init__(self, layers=None, name=None): super(Sequential, self).__init__(name=name) self.supports_masking = True self._build_input_shape = None self._compute_output_and_mask_jointly = True self._layer_call_argspecs = {} # Add to the model any layers passed to the constructor. if layers: for layer in layers: self.add(layer) @property def layers(self): # Historically, `sequential.layers` only returns layers that were added # via `add`, and omits the auto-generated `InputLayer` that comes at the # bottom of the stack. # `CheckpointableBase` manages the `_layers` attributes and does filtering # over it. layers = super(Sequential, self).layers if layers and isinstance(layers[0], InputLayer): return layers[1:] return layers[:] @property def dynamic(self): return any(layer.dynamic for layer in self.layers) @checkpointable.no_automatic_dependency_tracking def add(self, layer): """Adds a layer instance on top of the layer stack. Arguments: layer: layer instance. Raises: TypeError: If `layer` is not a layer instance. ValueError: In case the `layer` argument does not know its input shape. ValueError: In case the `layer` argument has multiple output tensors, or is already connected somewhere else (forbidden in `Sequential` models). """ if not isinstance(layer, base_layer.Layer): raise TypeError('The added layer must be ' 'an instance of class Layer. ' 'Found: ' + str(layer)) self.built = False set_inputs = False if not self._layers: if isinstance(layer, InputLayer): # Corner case where the user passes an InputLayer layer via `add`. assert len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) == 1 set_inputs = True else: batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer) if batch_shape: # Instantiate an input layer. x = Input( batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input') # This will build the current layer # and create the node connecting the current layer # to the input layer we just created. layer(x) set_inputs = True if set_inputs: # If an input layer (placeholder) is available. if len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) != 1: raise ValueError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') self.outputs = [ nest.flatten(layer._inbound_nodes[-1].output_tensors)[0] ] self.inputs = layer_utils.get_source_inputs(self.outputs[0]) elif self.outputs: # If the model is being built continuously on top of an input layer: # refresh its output. output_tensor = layer(self.outputs[0]) if isinstance(output_tensor, list): raise TypeError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') self.outputs = [output_tensor] if set_inputs or self._is_graph_network: self._init_graph_network(self.inputs, self.outputs, name=self.name) self.built = True else: self._layers.append(layer) if self._layers: self._track_layers(self._layers) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) @checkpointable.no_automatic_dependency_tracking def pop(self): """Removes the last layer in the model. Raises: TypeError: if there are no layers in the model. """ if not self.layers: raise TypeError('There are no layers in the model.') layer = self._layers.pop() self._layer_call_argspecs.pop(layer) if not self.layers: self.outputs = None self.inputs = None self.built = False elif self._is_graph_network: self.layers[-1]._outbound_nodes = [] self.outputs = [self.layers[-1].output] self._init_graph_network(self.inputs, self.outputs, name=self.name) self.built = True @base_layer.default def build(self, input_shape=None): if self._is_graph_network: self._init_graph_network(self.inputs, self.outputs, name=self.name) else: if input_shape is None: raise ValueError('You must provide an `input_shape` argument.') input_shape = tuple(input_shape) self._build_input_shape = input_shape super(Sequential, self).build(input_shape) self.built = True def call(self, inputs, training=None, mask=None): if self._is_graph_network: return super(Sequential, self).call(inputs, training=training, mask=mask) outputs, _ = self._call_and_compute_mask( inputs, training=training, mask=mask) return outputs def _call_and_compute_mask(self, inputs, training=None, mask=None): if not self.built and self._is_graph_network: self._init_graph_network(self.inputs, self.outputs, name=self.name) outputs = inputs # handle the corner case where self.layers is empty for layer in self.layers: # During each iteration, `inputs` are the inputs to `layer`, and `outputs` # are the outputs of `layer` applied to `inputs`. At the end of each # iteration `inputs` is set to `outputs` to prepare for the next layer. kwargs = {} argspec = self._layer_call_argspecs[layer].args if 'mask' in argspec: kwargs['mask'] = mask if 'training' in argspec: kwargs['training'] = training if isinstance(layer, Network) and layer._compute_output_and_mask_jointly: outputs, mask = layer._call_and_compute_mask(inputs, **kwargs) else: if not layer.built: # Build layer if applicable. with ops.name_scope(layer._name_scope()): layer._maybe_build(inputs) layer.built = True if layer.supports_masking: mask = layer.compute_mask(inputs, mask) else: mask = None if context.executing_eagerly(): # __call__ handles activity regularization. outputs = layer(inputs, **kwargs) elif layer.dynamic: outputs = layer._symbolic_call(inputs) layer._handle_activity_regularization(inputs, outputs) else: outputs = layer.call(inputs, **kwargs) layer._handle_activity_regularization(inputs, outputs) if not context.executing_eagerly(): outputs._keras_mask = mask # `outputs` will be the inputs to the next layer. inputs = outputs return outputs, mask def compute_output_shape(self, input_shape): shape = input_shape for layer in self.layers: shape = layer.compute_output_shape(shape) return shape def compute_mask(self, inputs, mask): _, mask = self._call_and_compute_mask(inputs, mask=mask) return mask def predict_proba(self, x, batch_size=32, verbose=0): """Generates class probability predictions for the input samples. The input samples are processed batch by batch. Arguments: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A Numpy array of probability predictions. """ preds = self.predict(x, batch_size, verbose) if preds.min() < 0. or preds.max() > 1.: logging.warning('Network returning invalid probability values. ' 'The last layer might not normalize predictions ' 'into probabilities ' '(like softmax or sigmoid would).') return preds def predict_classes(self, x, batch_size=32, verbose=0): """Generate class predictions for the input samples. The input samples are processed batch by batch. Arguments: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A numpy array of class predictions. """ proba = self.predict(x, batch_size=batch_size, verbose=verbose) if proba.shape[-1] > 1: return proba.argmax(axis=-1) else: return (proba > 0.5).astype('int32') def save(self, filepath, overwrite=True, include_optimizer=True): from tensorflow.python.keras.models import save_model # pylint: disable=g-import-not-at-top save_model(self, filepath, overwrite, include_optimizer) def get_config(self): layer_configs = [] for layer in self.layers: layer_configs.append({ 'class_name': layer.__class__.__name__, 'config': layer.get_config() }) config = { 'name': self.name, 'layers': copy.deepcopy(layer_configs) } if self._build_input_shape: config['build_input_shape'] = self._build_input_shape return config @classmethod def from_config(cls, config, custom_objects=None): if 'name' in config: name = config['name'] build_input_shape = config.get('build_input_shape') layer_configs = config['layers'] else: name = None build_input_shape = None layer_configs = config model = cls(name=name) for layer_config in layer_configs: layer = layer_module.deserialize(layer_config, custom_objects=custom_objects) model.add(layer) if not model.inputs and build_input_shape: model.build(build_input_shape) if not model._is_graph_network: # Still needs to be built when passed input data. model.built = False return model @property def input_spec(self): if self.layers and hasattr(self.layers[0], 'input_spec'): return self.layers[0].input_spec return None
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Core conversion logic, serves as main point of access.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import imp import sys import threading import types import unittest import weakref import gast from tensorflow.python.autograph import operators from tensorflow.python.autograph import utils from tensorflow.python.autograph.converters import arg_defaults from tensorflow.python.autograph.converters import asserts from tensorflow.python.autograph.converters import break_statements from tensorflow.python.autograph.converters import call_trees from tensorflow.python.autograph.converters import conditional_expressions from tensorflow.python.autograph.converters import continue_statements from tensorflow.python.autograph.converters import control_flow from tensorflow.python.autograph.converters import directives from tensorflow.python.autograph.converters import function_scopes from tensorflow.python.autograph.converters import lists from tensorflow.python.autograph.converters import logical_expressions from tensorflow.python.autograph.converters import return_statements from tensorflow.python.autograph.converters import slices from tensorflow.python.autograph.core import config from tensorflow.python.autograph.core import converter from tensorflow.python.autograph.core import function_wrappers from tensorflow.python.autograph.core import naming from tensorflow.python.autograph.core import unsupported_features_checker from tensorflow.python.autograph.lang import special_functions from tensorflow.python.autograph.pyct import ast_util from tensorflow.python.autograph.pyct import compiler from tensorflow.python.autograph.pyct import inspect_utils from tensorflow.python.autograph.pyct import origin_info from tensorflow.python.autograph.pyct import parser from tensorflow.python.autograph.pyct import pretty_printer from tensorflow.python.autograph.pyct import qual_names from tensorflow.python.autograph.pyct import templates from tensorflow.python.autograph.pyct import transformer from tensorflow.python.autograph.utils import ag_logging as logging from tensorflow.python.util import tf_inspect class _ConvertedEntityFactoryInfo( collections.namedtuple( '_ConvertedEntityFactoryInfo', ('module_name', 'converted_name', 'factory_factory_name', 'source_map')) ): """Holds metadata about a converted entity stored as a dynamic factory. The dynamic factory is assumed to be created by _wrap_into_dynamic_factory, be named `factory_factory_name` and located inside the module named as `module_name`. Attributes: module_name: Text, the name of the module containing the entity. converted_name: Text, the name of the converted entity. factory_factory_name: Text, the name of the dynamic factory. source_map: Dict. """ def __str__(self): return '_ConvertedEntityFactoryInfo({} in {})'.format( self.converted_name, self.module_name) def get_module(self): return sys.modules[self.module_name] def get_factory(self): assert self.module_name in sys.modules factory_factory = getattr(sys.modules[self.module_name], self.factory_factory_name) return factory_factory() # TODO(mdan): Add a garbage collection hook for cleaning up modules. class _ConversionCache(object): """A hierarchical cache that uses the converted entity as weak key. The keys soft references (i.e. they are discarded when the key is destroyed). The subkeys are normal hashable values. This class is generic - see the call site for how the keys and values are defined. """ def __init__(self): self._cache = weakref.WeakKeyDictionary() def has(self, key, subkey): if key not in self._cache: return False return subkey in self._cache[key] def __getitem__(self, key): if key not in self._cache: # The bucket needs to be initialized to support this usage: # cache[key][subkey] = value self._cache[key] = {} return self._cache[key] # Using a re-entrant lock to guard against the unlikely possibility that the # conversion process tiggers additional code execution. _CACHE_LOCK = threading.RLock() _CACHE = _ConversionCache() _UNCONVERTED_CACHE = _ConversionCache() # Note: strictly speaking, a simple factory might have been sufficient for # functions. But the double factory approach allows us to control the closure # and globals of the converted code in a cleaner fashion. # TODO(mdan): A simple factory may be sufficient. def _wrap_into_dynamic_factory(nodes, entity_name, factory_factory_name, factory_name, closure_vars, future_features): """Wraps an AST into the body of a dynamic factory. This uses the dynamic factory (factory of factory) pattern to achieve the following: 1. The inner factory, dynamically creates the entity represented by nodes. 2. The entity is parametrized by `ag__`, the internal AutoGraph module. 3. The outer factory creates the inner factory with a lexical scope in which `closure_vars` are bound local variables. This in turn allows the caller to control the exact closure (i.e. non-global free variables) for the inner factory. The AST is expected to define some symbol named by `entity_name`. Args: nodes: ast.AST entity_name: Union[Text, ast.AST] factory_factory_name: Text factory_name: Text closure_vars: Iterable[Text] future_features: Iterable[Text], see EntityInfo.future_features. Returns: ast.AST """ if not isinstance(nodes, (list, tuple)): nodes = (nodes,) dummy_closure_defs = [] for var_name in closure_vars: template = """ var_name = None """ dummy_closure_defs.extend(templates.replace(template, var_name=var_name)) if future_features: future_imports = gast.ImportFrom( module='__future__', names=[gast.alias(name=name, asname=None) for name in future_features], level=0) else: future_imports = [] # These dummy symbol declarations create local fariables in a function scope, # so that the Python parser correctly marks them as free non-global variables # upon load (that is, it creates cell slots for each symbol). Their values are # not used, as the cells are swapped with the original entity's cells after # the code has been loaded. template = """ future_imports def factory_factory_name(): dummy_closure_defs def factory_name(ag__, ag_source_map__, ag_module__): entity_defs entity_name.ag_source_map = ag_source_map__ entity_name.ag_module = ag_module__ entity_name.autograph_info__ = {} return entity_name return factory_name """ return templates.replace( template, future_imports=future_imports, factory_factory_name=factory_factory_name, factory_name=factory_name, dummy_closure_defs=dummy_closure_defs, entity_defs=nodes, entity_name=entity_name) def _convert_with_cache(entity, program_ctx, free_nonglobal_var_names): """Returns a (possibly cached) factory for the converted result of entity.""" # The cache key is the entity's code object if it defined one, otherwise it's # the entity itself. Keying by the code object allows caching of functions # that are dynamically created e.g. in a loop. if hasattr(entity, '__code__'): key = entity.__code__ else: key = entity # The cache subkey encompases any conversion options on which the generated # code may depend. # The cached factory includes the necessary definitions to distinguish # between the global and non-global free variables. For this reason, the # cache subkey includes the names of the free non-globals. subkey = (program_ctx.options, frozenset(free_nonglobal_var_names)) with _CACHE_LOCK: # The cache values are _ConvertedEntityFactoryInfo objects. if _CACHE.has(key, subkey): # TODO(mdan): Check whether the module is still loaded. converted_entity_info = _CACHE[key][subkey] logging.log(3, 'Cache hit for entity %s key %s subkey %s: %s', entity, key, subkey, converted_entity_info) return converted_entity_info logging.log(1, 'Entity %s is not cached for key %s subkey %s', entity, key, subkey) nodes, converted_name, entity_info = convert_entity_to_ast( entity, program_ctx) namer = naming.Namer(entity_info.namespace) factory_factory_name = namer.new_symbol('create_converted_entity_factory', ()) factory_name = namer.new_symbol('create_converted_entity', ()) nodes = _wrap_into_dynamic_factory(nodes, converted_name, factory_factory_name, factory_name, free_nonglobal_var_names, entity_info.future_features) module, _, source_map = compiler.ast_to_object( nodes, include_source_map=True) module_name = module.__name__ converted_entity_info = _ConvertedEntityFactoryInfo( module_name=module_name, converted_name=converted_name, factory_factory_name=factory_factory_name, source_map=source_map) _CACHE[key][subkey] = converted_entity_info return converted_entity_info def _instantiate(entity, converted_entity_info, free_nonglobal_var_names): """Creates a converted instance and binds it to match original entity.""" factory = converted_entity_info.get_factory() # `factory` is currently bound to the empty module it was loaded from. # It must instead be bound to the globals and closure from the original # entity. if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity): entity_globals = entity.__globals__ entity_closure = entity.__closure__ or () elif hasattr(entity, '__module__'): entity_globals = sys.modules[entity.__module__].__dict__ entity_closure = () assert len(entity_closure) == len(free_nonglobal_var_names) # Fit the original entity's cells to match the order of factory's cells. original_names_and_cells = dict(zip(free_nonglobal_var_names, entity_closure)) new_factory_cells = tuple( original_names_and_cells[name] for name in factory.__code__.co_freevars) bound_factory = types.FunctionType( code=factory.__code__, globals=entity_globals, name=factory.__name__, argdefs=(), closure=new_factory_cells) # Two other free vars: the internal "ag__" module and the source # map. These are wired via the parameters of the factory. converted_entity = bound_factory( # pylint:disable=not-callable ag_internal, converted_entity_info.source_map, converted_entity_info.get_module()) if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity): # Attach the default argument to the converted function. converted_entity.__defaults__ = entity.__defaults__ if hasattr(entity, '__kwdefaults__'): converted_entity.__kwdefaults__ = entity.__kwdefaults__ return converted_entity def convert(entity, program_ctx): """Converts an entity into an equivalent entity.""" if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity): if not hasattr(entity, '__code__'): raise ValueError('Cannot apply autograph to a function that doesn\'t ' 'expose a __code__ object. If this is a @tf.function,' ' try passing f.python_function instead.') free_nonglobal_var_names = entity.__code__.co_freevars else: free_nonglobal_var_names = () for i, name in enumerate(free_nonglobal_var_names): if (name == 'ag__' and entity.__closure__[i].cell_contents is not ag_internal): raise ValueError('entity {} uses the reserved symbol "{}"'.format( entity, name)) # TODO(mdan): In extreme cases, other ag__ symbols may also be clobbered. converted_entity_info = _convert_with_cache(entity, program_ctx, free_nonglobal_var_names) return _instantiate(entity, converted_entity_info, free_nonglobal_var_names) # TODO(mdan): allow_namedtuple_subclass should be hardcoded to True. def is_whitelisted_for_graph( o, check_call_override=True, allow_namedtuple_subclass=False): """Checks whether an entity is whitelisted for use in graph mode. Examples of whitelisted entities include all members of the tensorflow package. Args: o: A Python entity. check_call_override: Reserved for internal use. When set to `False`, it disables the rule according to which classes are whitelisted if their __call__ method is whitelisted. allow_namedtuple_subclass: Reserved for internal use. When `True`, namedtuple subclasses are not whitelisted. Returns: Boolean """ # TODO(b/120224672): Fix this. if isinstance(o, functools.partial): # tf_inspect.getmodule(functools.partial(...)) otherwise returns None since # functools.partial objects do not have a __module__ attribute. m = functools else: m = tf_inspect.getmodule(o) # Examples of callables that lack a __module__ property include builtins. if hasattr(m, '__name__'): for rule in config.CONVERSION_RULES: action = rule.get_action(m) if action == config.Action.CONVERT: logging.log(2, 'Not whitelisted: %s: %s', o, rule) return False elif action == config.Action.DO_NOT_CONVERT: logging.log(2, 'Whitelisted: %s: %s', o, rule) return True if tf_inspect.isgeneratorfunction(o): logging.warn( 'Entity %s appears to be a generator function. It will not be converted' ' by AutoGraph.', o) logging.log(2, 'Whitelisted: %s: generator functions are not converted', o) return True if (check_call_override and not tf_inspect.isclass(o) and hasattr(o, '__call__')): # Callable objects: whitelisted if their __call__ method is. # The type check avoids infinite recursion around the __call__ method # of function objects. if (type(o) != type(o.__call__)) and is_whitelisted_for_graph(o.__call__): # pylint: disable=unidiomatic-typecheck logging.log(2, 'Whitelisted: %s: object __call__ whitelisted', o) return True owner_class = None if tf_inspect.ismethod(o): # Methods of whitelisted classes are also whitelisted, even if they are # bound via user subclasses. # # For example, suppose `tf.Foo` has a method called `bar`, and `baz` is # defined as below. `tf.Foo` is whitelisted. Then `baz.bar` is also # whitelisted. # # class Custom(tf.Foo): # pass # # baz = Custom() # # For the example above, if `Custom` did overload `bar`, then it would no # longer be whitelisted. owner_class = inspect_utils.getmethodclass(o) if owner_class is not None: if issubclass(owner_class, unittest.TestCase): logging.log(2, 'Whitelisted: %s: method of TestCase subclass', o) return True owner_class = inspect_utils.getdefiningclass(o, owner_class) if is_whitelisted_for_graph( owner_class, check_call_override=False, allow_namedtuple_subclass=True): logging.log(2, 'Whitelisted: %s: owner is whitelisted %s', o, owner_class) return True if inspect_utils.isnamedtuple(o): # Due to the way they're constructed, namedtuple types cannot be converted # because they don't expose source code. But we assume they are safe for # graph mode since they are just containers. if allow_namedtuple_subclass: if not any(inspect_utils.isnamedtuple(base) for base in o.__bases__): logging.log(2, 'Whitelisted: %s: named tuple', o) return True else: logging.log(2, 'Whitelisted: %s: named tuple or subclass', o) return True logging.log(2, 'Not whitelisted: %s: default rule', o) return False def check_cached_unconverted(entity, options): try: # Catch-all for entities that are unhashable or don't allow weakrefs. return _UNCONVERTED_CACHE.has(entity, options) except TypeError: return False def cache_unconverted(entity, options): try: # Catch-all for entities that are unhashable or don't allow weakrefs. _UNCONVERTED_CACHE[entity][options] = True except TypeError: pass # TODO(mdan): Rename to convert_*_node to avoid confusion with convert. def convert_entity_to_ast(o, program_ctx): """Compile a Python entity into equivalent TensorFlow. Args: o: A Python entity. program_ctx: A ProgramContext object. Returns: A tuple (ast, new_name, namespace): * ast: An AST representing an entity with interface equivalent to `o`, but which when executed it creates TF a graph. * new_name: The symbol name under which the new entity can be found. * namespace: A dict mapping all symbols visible to the converted entity, keyed by their symbol name. Raises: ValueError: if the entity type is not supported. """ logging.log(1, 'Converting %s', o) if tf_inspect.isclass(o): nodes, name, entity_info = convert_class_to_ast(o, program_ctx) elif tf_inspect.isfunction(o): nodes, name, entity_info = convert_func_to_ast(o, program_ctx) elif tf_inspect.ismethod(o): nodes, name, entity_info = convert_func_to_ast(o, program_ctx) elif hasattr(o, '__class__'): # Note: this should only be raised when attempting to convert the object # directly. converted_call should still support it. raise NotImplementedError( 'cannot convert entity "{}": object conversion is not yet' ' supported.'.format(o)) else: raise ValueError( 'Entity "%s" has unsupported type "%s". Only functions and classes are ' 'supported for now.' % (o, type(o))) if logging.has_verbosity(2): logging.log(2, 'Compiled output of %s:\n\n%s\n', o, compiler.ast_to_source(nodes)) if logging.has_verbosity(4): for n in nodes: logging.log(4, 'Compiled AST of %s:\n\n%s\n\n', o, pretty_printer.fmt(n, color=False)) return nodes, name, entity_info def convert_class_to_ast(c, program_ctx): """Specialization of `convert_entity_to_ast` for classes.""" # TODO(mdan): Revisit this altogether. Not sure we still need it. converted_members = {} method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m) members = tf_inspect.getmembers(c, predicate=method_filter) if not members: raise ValueError('cannot convert %s: no member methods' % c) # TODO(mdan): Don't clobber namespaces for each method in one class namespace. # The assumption that one namespace suffices for all methods only holds if # all methods were defined in the same module. # If, instead, functions are imported from multiple modules and then spliced # into the class, then each function has its own globals and __future__ # imports that need to stay separate. # For example, C's methods could both have `global x` statements referring to # mod1.x and mod2.x, but using one namespace for C would cause a conflict. # from mod1 import f1 # from mod2 import f2 # class C(object): # method1 = f1 # method2 = f2 class_namespace = {} future_features = None for _, m in members: # Only convert the members that are directly defined by the class. if inspect_utils.getdefiningclass(m, c) is not c: continue (node,), _, entity_info = convert_func_to_ast( m, program_ctx=program_ctx, do_rename=False) class_namespace.update(entity_info.namespace) converted_members[m] = node # TODO(mdan): Similarly check the globals. if future_features is None: future_features = entity_info.future_features elif frozenset(future_features) ^ frozenset(entity_info.future_features): # Note: we can support this case if ever needed. raise ValueError( 'cannot convert {}: if has methods built with mismatched future' ' features: {} and {}'.format(c, future_features, entity_info.future_features)) namer = naming.Namer(class_namespace) class_name = namer.class_name(c.__name__) # Process any base classes: if the superclass if of a whitelisted type, an # absolute import line is generated. output_nodes = [] renames = {} base_names = [] for base in c.__bases__: if isinstance(object, base): base_names.append('object') continue if is_whitelisted_for_graph(base): alias = namer.new_symbol(base.__name__, ()) output_nodes.append( gast.ImportFrom( module=base.__module__, names=[gast.alias(name=base.__name__, asname=alias)], level=0)) else: raise NotImplementedError( 'Conversion of classes that do not directly extend classes from' ' whitelisted modules is temporarily suspended. If this breaks' ' existing code please notify the AutoGraph team immediately.') base_names.append(alias) renames[qual_names.QN(base.__name__)] = qual_names.QN(alias) # Generate the definition of the converted class. bases = [gast.Name(n, gast.Load(), None) for n in base_names] class_def = gast.ClassDef( class_name, bases=bases, keywords=[], body=list(converted_members.values()), decorator_list=[]) # Make a final pass to replace references to the class or its base classes. # Most commonly, this occurs when making super().__init__() calls. # TODO(mdan): Making direct references to superclass' superclass will fail. class_def = qual_names.resolve(class_def) renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name) class_def = ast_util.rename_symbols(class_def, renames) output_nodes.append(class_def) # TODO(mdan): Find a way better than forging this object. entity_info = transformer.EntityInfo( source_code=None, source_file=None, future_features=future_features, namespace=class_namespace) return output_nodes, class_name, entity_info def _add_reserved_symbol(namespace, name, entity): if name not in namespace: namespace[name] = entity elif namespace[name] != entity: raise ValueError('The name "%s" is reserved and may not be used.' % name) ag_internal = None # TODO(mdan): Move into core or replace with an actual importable module. def _add_self_references(namespace, autograph_module): """Adds namespace references to the module that exposes the api itself.""" global ag_internal if ag_internal is None: # Craft a module that exposes parts of the external API as well as certain # internal modules. ag_internal = imp.new_module('autograph') ag_internal.__dict__.update(autograph_module.__dict__) ag_internal.ConversionOptions = converter.ConversionOptions ag_internal.STD = converter.STANDARD_OPTIONS ag_internal.Feature = converter.Feature ag_internal.utils = utils ag_internal.FunctionScope = function_wrappers.FunctionScope ag_internal.with_function_scope = function_wrappers.with_function_scope # TODO(mdan): Add safeguards against name clashes. # We don't want to create a submodule because we want the operators to be # accessible as ag__.<operator> ag_internal.__dict__.update(special_functions.__dict__) ag_internal.__dict__.update(operators.__dict__) _add_reserved_symbol(namespace, 'ag__', ag_internal) def convert_func_to_ast(f, program_ctx, do_rename=True): """Specialization of `convert_entity_to_ast` for callable functions.""" future_features = inspect_utils.getfutureimports(f) node, source = parser.parse_entity(f, future_features=future_features) logging.log(3, 'Source code of %s:\n\n%s\n', f, source) # Parsed AST should contain future imports and one function def node. # In general, the output of inspect.getsource is inexact for lambdas because # it uses regex matching to adjust the exact location around the line number # that CPython records. Then, the entire containing line is returned, which # we may have trouble disambiguating. For example: # x, y = lambda: 1, lambda: 2 if f.__name__ == '<lambda>': nodes = ast_util.find_matching_definitions(node, f) if len(nodes) != 1: raise ValueError( 'Unable to identify source code of lambda function {}. It was' ' defined on this line: {}, which must contain a single lambda with' ' matching signature. To avoid ambiguity, define each lambda' ' in a separate expression.'.format(f, source)) node, = nodes # TODO(znado): Place inside standard_analysis. origin_info.resolve_entity(node, source, f) namespace = inspect_utils.getnamespace(f) _add_self_references(namespace, program_ctx.autograph_module) namer = naming.Namer(namespace) if isinstance(node, gast.Lambda): new_name = namer.new_symbol('tf__lambda', ()) elif do_rename: new_name = namer.function_name(f.__name__) else: new_name = f.__name__ entity_info = transformer.EntityInfo( source_code=source, source_file='<fragment>', future_features=future_features, namespace=namespace) context = converter.EntityContext(namer, entity_info, program_ctx, new_name) node = node_to_graph(node, context) if isinstance(node, gast.Lambda): node = gast.Assign( targets=[gast.Name(new_name, gast.Store(), None)], value=node) elif do_rename: node.name = new_name else: assert node.name == new_name return (node,), new_name, entity_info def node_to_graph(node, context): """Convert Python code to equivalent TF graph mode code. Args: node: AST, the code to convert. context: converter.EntityContext Returns: A tuple (node, deps): * node: A Python ast node, representing the converted code. * deps: A set of strings, the fully qualified names of entity dependencies that this node has. """ # TODO(mdan): Insert list_comprehensions somewhere. unsupported_features_checker.verify(node) node = converter.standard_analysis(node, context, is_initial=True) node = converter.apply_(node, context, function_scopes) node = converter.apply_(node, context, arg_defaults) node = converter.apply_(node, context, directives) node = converter.apply_(node, context, break_statements) if context.program.options.uses(converter.Feature.ASSERT_STATEMENTS): node = converter.apply_(node, context, asserts) # Note: sequencing continue canonicalization before for loop one avoids # dealing with the extra loop increment operation that the for # canonicalization creates. node = converter.apply_(node, context, continue_statements) node = converter.apply_(node, context, return_statements) if context.program.options.uses(converter.Feature.LISTS): node = converter.apply_(node, context, lists) node = converter.apply_(node, context, slices) node = converter.apply_(node, context, call_trees) node = converter.apply_(node, context, control_flow) node = converter.apply_(node, context, conditional_expressions) node = converter.apply_(node, context, logical_expressions) return node
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import re from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.openstack.common import log as logging from neutron.openstack.common import uuidutils LOG = logging.getLogger(__name__) ATTR_NOT_SPECIFIED = object() # Defining a constant to avoid repeating string literal in several modules SHARED = 'shared' # Used by range check to indicate no limit for a bound. UNLIMITED = None def _verify_dict_keys(expected_keys, target_dict, strict=True): """Allows to verify keys in a dictionary. :param expected_keys: A list of keys expected to be present. :param target_dict: The dictionary which should be verified. :param strict: Specifies whether additional keys are allowed to be present. :return: True, if keys in the dictionary correspond to the specification. """ if not isinstance(target_dict, dict): msg = (_("Invalid input. '%(target_dict)s' must be a dictionary " "with keys: %(expected_keys)s") % {'target_dict': target_dict, 'expected_keys': expected_keys}) return msg expected_keys = set(expected_keys) provided_keys = set(target_dict.keys()) predicate = expected_keys.__eq__ if strict else expected_keys.issubset if not predicate(provided_keys): msg = (_("Validation of dictionary's keys failed." "Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s") % {'expected_keys': expected_keys, 'provided_keys': provided_keys}) return msg def is_attr_set(attribute): return not (attribute is None or attribute is ATTR_NOT_SPECIFIED) def _validate_values(data, valid_values=None): if data not in valid_values: msg = (_("'%(data)s' is not in %(valid_values)s") % {'data': data, 'valid_values': valid_values}) LOG.debug(msg) return msg def _validate_not_empty_string_or_none(data, max_len=None): if data is not None: return _validate_not_empty_string(data, max_len=max_len) def _validate_not_empty_string(data, max_len=None): msg = _validate_string(data, max_len=max_len) if msg: return msg if not data.strip(): return _("'%s' Blank strings are not permitted") % data def _validate_string_or_none(data, max_len=None): if data is not None: return _validate_string(data, max_len=max_len) def _validate_string(data, max_len=None): if not isinstance(data, basestring): msg = _("'%s' is not a valid string") % data LOG.debug(msg) return msg if max_len is not None and len(data) > max_len: msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") % {'data': data, 'max_len': max_len}) LOG.debug(msg) return msg def _validate_boolean(data, valid_values=None): try: convert_to_boolean(data) except n_exc.InvalidInput: msg = _("'%s' is not a valid boolean value") % data LOG.debug(msg) return msg def _validate_range(data, valid_values=None): """Check that integer value is within a range provided. Test is inclusive. Allows either limit to be ignored, to allow checking ranges where only the lower or upper limit matter. It is expected that the limits provided are valid integers or the value None. """ min_value = valid_values[0] max_value = valid_values[1] try: data = int(data) except (ValueError, TypeError): msg = _("'%s' is not an integer") % data LOG.debug(msg) return msg if min_value is not UNLIMITED and data < min_value: msg = _("'%(data)s' is too small - must be at least " "'%(limit)d'") % {'data': data, 'limit': min_value} LOG.debug(msg) return msg if max_value is not UNLIMITED and data > max_value: msg = _("'%(data)s' is too large - must be no larger than " "'%(limit)d'") % {'data': data, 'limit': max_value} LOG.debug(msg) return msg def _validate_no_whitespace(data): """Validates that input has no whitespace.""" if len(data.split()) > 1: msg = _("'%s' contains whitespace") % data LOG.debug(msg) raise n_exc.InvalidInput(error_message=msg) return data def _validate_mac_address(data, valid_values=None): valid_mac = False try: valid_mac = netaddr.valid_mac(_validate_no_whitespace(data)) except Exception: pass finally: # TODO(arosen): The code in this file should be refactored # so it catches the correct exceptions. _validate_no_whitespace # raises AttributeError if data is None. if valid_mac is False: msg = _("'%s' is not a valid MAC address") % data LOG.debug(msg) return msg def _validate_mac_address_or_none(data, valid_values=None): if data is None: return return _validate_mac_address(data, valid_values) def _validate_ip_address(data, valid_values=None): try: netaddr.IPAddress(_validate_no_whitespace(data)) except Exception: msg = _("'%s' is not a valid IP address") % data LOG.debug(msg) return msg def _validate_ip_pools(data, valid_values=None): """Validate that start and end IP addresses are present. In addition to this the IP addresses will also be validated """ if not isinstance(data, list): msg = _("Invalid data format for IP pool: '%s'") % data LOG.debug(msg) return msg expected_keys = ['start', 'end'] for ip_pool in data: msg = _verify_dict_keys(expected_keys, ip_pool) if msg: LOG.debug(msg) return msg for k in expected_keys: msg = _validate_ip_address(ip_pool[k]) if msg: LOG.debug(msg) return msg def _validate_fixed_ips(data, valid_values=None): if not isinstance(data, list): msg = _("Invalid data format for fixed IP: '%s'") % data LOG.debug(msg) return msg ips = [] for fixed_ip in data: if not isinstance(fixed_ip, dict): msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip LOG.debug(msg) return msg if 'ip_address' in fixed_ip: # Ensure that duplicate entries are not set - just checking IP # suffices. Duplicate subnet_id's are legitimate. fixed_ip_address = fixed_ip['ip_address'] if fixed_ip_address in ips: msg = _("Duplicate IP address '%s'") % fixed_ip_address else: msg = _validate_ip_address(fixed_ip_address) if msg: LOG.debug(msg) return msg ips.append(fixed_ip_address) if 'subnet_id' in fixed_ip: msg = _validate_uuid(fixed_ip['subnet_id']) if msg: LOG.debug(msg) return msg def _validate_nameservers(data, valid_values=None): if not hasattr(data, '__iter__'): msg = _("Invalid data format for nameserver: '%s'") % data LOG.debug(msg) return msg ips = [] for ip in data: msg = _validate_ip_address(ip) if msg: # This may be a hostname msg = _validate_regex(ip, HOSTNAME_PATTERN) if msg: msg = _("'%s' is not a valid nameserver") % ip LOG.debug(msg) return msg if ip in ips: msg = _("Duplicate nameserver '%s'") % ip LOG.debug(msg) return msg ips.append(ip) def _validate_hostroutes(data, valid_values=None): if not isinstance(data, list): msg = _("Invalid data format for hostroute: '%s'") % data LOG.debug(msg) return msg expected_keys = ['destination', 'nexthop'] hostroutes = [] for hostroute in data: msg = _verify_dict_keys(expected_keys, hostroute) if msg: LOG.debug(msg) return msg msg = _validate_subnet(hostroute['destination']) if msg: LOG.debug(msg) return msg msg = _validate_ip_address(hostroute['nexthop']) if msg: LOG.debug(msg) return msg if hostroute in hostroutes: msg = _("Duplicate hostroute '%s'") % hostroute LOG.debug(msg) return msg hostroutes.append(hostroute) def _validate_ip_address_or_none(data, valid_values=None): if data is None: return None return _validate_ip_address(data, valid_values) def _validate_subnet(data, valid_values=None): msg = None try: net = netaddr.IPNetwork(_validate_no_whitespace(data)) if '/' not in data: msg = _("'%(data)s' isn't a recognized IP subnet cidr," " '%(cidr)s' is recommended") % {"data": data, "cidr": net.cidr} else: return except Exception: msg = _("'%s' is not a valid IP subnet") % data if msg: LOG.debug(msg) return msg def _validate_subnet_list(data, valid_values=None): if not isinstance(data, list): msg = _("'%s' is not a list") % data LOG.debug(msg) return msg if len(set(data)) != len(data): msg = _("Duplicate items in the list: '%s'") % ', '.join(data) LOG.debug(msg) return msg for item in data: msg = _validate_subnet(item) if msg: return msg def _validate_subnet_or_none(data, valid_values=None): if data is None: return return _validate_subnet(data, valid_values) def _validate_regex(data, valid_values=None): try: if re.match(valid_values, data): return except TypeError: pass msg = _("'%s' is not a valid input") % data LOG.debug(msg) return msg def _validate_regex_or_none(data, valid_values=None): if data is None: return return _validate_regex(data, valid_values) def _validate_uuid(data, valid_values=None): if not uuidutils.is_uuid_like(data): msg = _("'%s' is not a valid UUID") % data LOG.debug(msg) return msg def _validate_uuid_or_none(data, valid_values=None): if data is not None: return _validate_uuid(data) def _validate_uuid_list(data, valid_values=None): if not isinstance(data, list): msg = _("'%s' is not a list") % data LOG.debug(msg) return msg for item in data: msg = _validate_uuid(item) if msg: LOG.debug(msg) return msg if len(set(data)) != len(data): msg = _("Duplicate items in the list: '%s'") % ', '.join(data) LOG.debug(msg) return msg def _validate_dict_item(key, key_validator, data): # Find conversion function, if any, and apply it conv_func = key_validator.get('convert_to') if conv_func: data[key] = conv_func(data.get(key)) # Find validator function # TODO(salv-orlando): Structure of dict attributes should be improved # to avoid iterating over items val_func = val_params = None for (k, v) in key_validator.iteritems(): if k.startswith('type:'): # ask forgiveness, not permission try: val_func = validators[k] except KeyError: return _("Validator '%s' does not exist.") % k val_params = v break # Process validation if val_func: return val_func(data.get(key), val_params) def _validate_dict(data, key_specs=None): if not isinstance(data, dict): msg = _("'%s' is not a dictionary") % data LOG.debug(msg) return msg # Do not perform any further validation, if no constraints are supplied if not key_specs: return # Check whether all required keys are present required_keys = [key for key, spec in key_specs.iteritems() if spec.get('required')] if required_keys: msg = _verify_dict_keys(required_keys, data, False) if msg: LOG.debug(msg) return msg # Perform validation and conversion of all values # according to the specifications. for key, key_validator in [(k, v) for k, v in key_specs.iteritems() if k in data]: msg = _validate_dict_item(key, key_validator, data) if msg: LOG.debug(msg) return msg def _validate_dict_or_none(data, key_specs=None): if data is not None: return _validate_dict(data, key_specs) def _validate_dict_or_empty(data, key_specs=None): if data != {}: return _validate_dict(data, key_specs) def _validate_dict_or_nodata(data, key_specs=None): if data: return _validate_dict(data, key_specs) def _validate_non_negative(data, valid_values=None): try: data = int(data) except (ValueError, TypeError): msg = _("'%s' is not an integer") % data LOG.debug(msg) return msg if data < 0: msg = _("'%s' should be non-negative") % data LOG.debug(msg) return msg def convert_to_boolean(data): if isinstance(data, basestring): val = data.lower() if val == "true" or val == "1": return True if val == "false" or val == "0": return False elif isinstance(data, bool): return data elif isinstance(data, int): if data == 0: return False elif data == 1: return True msg = _("'%s' cannot be converted to boolean") % data raise n_exc.InvalidInput(error_message=msg) def convert_to_int(data): try: return int(data) except (ValueError, TypeError): msg = _("'%s' is not a integer") % data raise n_exc.InvalidInput(error_message=msg) def convert_kvp_str_to_list(data): """Convert a value of the form 'key=value' to ['key', 'value']. :raises: n_exc.InvalidInput if any of the strings are malformed (e.g. do not contain a key). """ kvp = [x.strip() for x in data.split('=', 1)] if len(kvp) == 2 and kvp[0]: return kvp msg = _("'%s' is not of the form <key>=[value]") % data raise n_exc.InvalidInput(error_message=msg) def convert_kvp_list_to_dict(kvp_list): """Convert a list of 'key=value' strings to a dict. :raises: n_exc.InvalidInput if any of the strings are malformed (e.g. do not contain a key) or if any of the keys appear more than once. """ if kvp_list == ['True']: # No values were provided (i.e. '--flag-name') return {} kvp_map = {} for kvp_str in kvp_list: key, value = convert_kvp_str_to_list(kvp_str) kvp_map.setdefault(key, set()) kvp_map[key].add(value) return dict((x, list(y)) for x, y in kvp_map.iteritems()) def convert_none_to_empty_list(value): return [] if value is None else value def convert_none_to_empty_dict(value): return {} if value is None else value def convert_to_list(data): if data is None: return [] elif hasattr(data, '__iter__'): return list(data) else: return [data] HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]" "{1,63}(?<!-)\.?)+(?:[a-zA-Z]{2,})$)") HEX_ELEM = '[0-9A-Fa-f]' UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}', HEX_ELEM + '{4}', HEX_ELEM + '{4}', HEX_ELEM + '{12}']) # Note: In order to ensure that the MAC address is unicast the first byte # must be even. MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM) # Dictionary that maintains a list of validation functions validators = {'type:dict': _validate_dict, 'type:dict_or_none': _validate_dict_or_none, 'type:dict_or_empty': _validate_dict_or_empty, 'type:dict_or_nodata': _validate_dict_or_nodata, 'type:fixed_ips': _validate_fixed_ips, 'type:hostroutes': _validate_hostroutes, 'type:ip_address': _validate_ip_address, 'type:ip_address_or_none': _validate_ip_address_or_none, 'type:ip_pools': _validate_ip_pools, 'type:mac_address': _validate_mac_address, 'type:mac_address_or_none': _validate_mac_address_or_none, 'type:nameservers': _validate_nameservers, 'type:non_negative': _validate_non_negative, 'type:range': _validate_range, 'type:regex': _validate_regex, 'type:regex_or_none': _validate_regex_or_none, 'type:string': _validate_string, 'type:string_or_none': _validate_string_or_none, 'type:not_empty_string': _validate_not_empty_string, 'type:not_empty_string_or_none': _validate_not_empty_string_or_none, 'type:subnet': _validate_subnet, 'type:subnet_list': _validate_subnet_list, 'type:subnet_or_none': _validate_subnet_or_none, 'type:uuid': _validate_uuid, 'type:uuid_or_none': _validate_uuid_or_none, 'type:uuid_list': _validate_uuid_list, 'type:values': _validate_values, 'type:boolean': _validate_boolean} # Define constants for base resource name NETWORK = 'network' NETWORKS = '%ss' % NETWORK PORT = 'port' PORTS = '%ss' % PORT SUBNET = 'subnet' SUBNETS = '%ss' % SUBNET # Note: a default of ATTR_NOT_SPECIFIED indicates that an # attribute is not required, but will be generated by the plugin # if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED # is different from an attribute that has been specified with a value of # None. For example, if 'gateway_ip' is omitted in a request to # create a subnet, the plugin will receive ATTR_NOT_SPECIFIED # and the default gateway_ip will be generated. # However, if gateway_ip is specified as None, this means that # the subnet does not have a gateway IP. # The following is a short reference for understanding attribute info: # default: default value of the attribute (if missing, the attribute # becomes mandatory. # allow_post: the attribute can be used on POST requests. # allow_put: the attribute can be used on PUT requests. # validate: specifies rules for validating data in the attribute. # convert_to: transformation to apply to the value before it is returned # is_visible: the attribute is returned in GET responses. # required_by_policy: the attribute is required by the policy engine and # should therefore be filled by the API layer even if not present in # request body. # enforce_policy: the attribute is actively part of the policy enforcing # mechanism, ie: there might be rules which refer to this attribute. RESOURCE_ATTRIBUTE_MAP = { NETWORKS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'subnets': {'allow_post': False, 'allow_put': False, 'default': [], 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, SHARED: {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': convert_to_boolean, 'is_visible': True, 'required_by_policy': True, 'enforce_policy': True}, }, PORTS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'default': '', 'validate': {'type:string': None}, 'is_visible': True}, 'network_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:uuid': None}, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': convert_to_boolean, 'is_visible': True}, 'mac_address': {'allow_post': True, 'allow_put': False, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:mac_address': None}, 'enforce_policy': True, 'is_visible': True}, 'fixed_ips': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'convert_list_to': convert_kvp_list_to_dict, 'validate': {'type:fixed_ips': None}, 'enforce_policy': True, 'is_visible': True}, 'device_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'device_owner': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, SUBNETS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'default': '', 'validate': {'type:string': None}, 'is_visible': True}, 'ip_version': {'allow_post': True, 'allow_put': False, 'convert_to': convert_to_int, 'validate': {'type:values': [4, 6]}, 'is_visible': True}, 'network_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:uuid': None}, 'is_visible': True}, 'cidr': {'allow_post': True, 'allow_put': False, 'validate': {'type:subnet': None}, 'is_visible': True}, 'gateway_ip': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True}, 'allocation_pools': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:ip_pools': None}, 'is_visible': True}, 'dns_nameservers': {'allow_post': True, 'allow_put': True, 'convert_to': convert_none_to_empty_list, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:nameservers': None}, 'is_visible': True}, 'host_routes': {'allow_post': True, 'allow_put': True, 'convert_to': convert_none_to_empty_list, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:hostroutes': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'enable_dhcp': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': convert_to_boolean, 'is_visible': True}, 'ipv6_ra_mode': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:values': constants.IPV6_MODES}, 'is_visible': True}, 'ipv6_address_mode': {'allow_post': True, 'allow_put': True, 'default': ATTR_NOT_SPECIFIED, 'validate': {'type:values': constants.IPV6_MODES}, 'is_visible': True}, SHARED: {'allow_post': False, 'allow_put': False, 'default': False, 'convert_to': convert_to_boolean, 'is_visible': False, 'required_by_policy': True, 'enforce_policy': True}, } } # Identify the attribute used by a resource to reference another resource RESOURCE_FOREIGN_KEYS = { NETWORKS: 'network_id' } PLURALS = {NETWORKS: NETWORK, PORTS: PORT, SUBNETS: SUBNET, 'dns_nameservers': 'dns_nameserver', 'host_routes': 'host_route', 'allocation_pools': 'allocation_pool', 'fixed_ips': 'fixed_ip', 'extensions': 'extension'} EXT_NSES = {} # Namespaces to be added for backward compatibility # when existing extended resource attributes are # provided by other extension than original one. EXT_NSES_BC = {} def get_attr_metadata(): return {'plurals': PLURALS, 'xmlns': constants.XML_NS_V20, constants.EXT_NS: EXT_NSES, constants.EXT_NS_COMP: EXT_NSES_BC}
# fileserverclient.py - client for communicating with the cache process # # Copyright 2013 Facebook, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import io import os import threading import time import zlib from mercurial.i18n import _ from mercurial.node import bin, hex from mercurial import ( error, pycompat, revlog, sshpeer, util, wireprotov1peer, ) from mercurial.utils import ( hashutil, procutil, ) from . import ( constants, contentstore, metadatastore, ) _sshv1peer = sshpeer.sshv1peer # Statistics for debugging fetchcost = 0 fetches = 0 fetched = 0 fetchmisses = 0 _lfsmod = None def getcachekey(reponame, file, id): pathhash = hex(hashutil.sha1(file).digest()) return os.path.join(reponame, pathhash[:2], pathhash[2:], id) def getlocalkey(file, id): pathhash = hex(hashutil.sha1(file).digest()) return os.path.join(pathhash, id) def peersetup(ui, peer): class remotefilepeer(peer.__class__): @wireprotov1peer.batchable def x_rfl_getfile(self, file, node): if not self.capable(b'x_rfl_getfile'): raise error.Abort( b'configured remotefile server does not support getfile' ) f = wireprotov1peer.future() yield {b'file': file, b'node': node}, f code, data = f.value.split(b'\0', 1) if int(code): raise error.LookupError(file, node, data) yield data @wireprotov1peer.batchable def x_rfl_getflogheads(self, path): if not self.capable(b'x_rfl_getflogheads'): raise error.Abort( b'configured remotefile server does not ' b'support getflogheads' ) f = wireprotov1peer.future() yield {b'path': path}, f heads = f.value.split(b'\n') if f.value else [] yield heads def _updatecallstreamopts(self, command, opts): if command != b'getbundle': return if ( constants.NETWORK_CAP_LEGACY_SSH_GETFILES not in self.capabilities() ): return if not util.safehasattr(self, '_localrepo'): return if ( constants.SHALLOWREPO_REQUIREMENT not in self._localrepo.requirements ): return bundlecaps = opts.get(b'bundlecaps') if bundlecaps: bundlecaps = [bundlecaps] else: bundlecaps = [] # shallow, includepattern, and excludepattern are a hacky way of # carrying over data from the local repo to this getbundle # command. We need to do it this way because bundle1 getbundle # doesn't provide any other place we can hook in to manipulate # getbundle args before it goes across the wire. Once we get rid # of bundle1, we can use bundle2's _pullbundle2extraprepare to # do this more cleanly. bundlecaps.append(constants.BUNDLE2_CAPABLITY) if self._localrepo.includepattern: patterns = b'\0'.join(self._localrepo.includepattern) includecap = b"includepattern=" + patterns bundlecaps.append(includecap) if self._localrepo.excludepattern: patterns = b'\0'.join(self._localrepo.excludepattern) excludecap = b"excludepattern=" + patterns bundlecaps.append(excludecap) opts[b'bundlecaps'] = b','.join(bundlecaps) def _sendrequest(self, command, args, **opts): self._updatecallstreamopts(command, args) return super(remotefilepeer, self)._sendrequest( command, args, **opts ) def _callstream(self, command, **opts): supertype = super(remotefilepeer, self) if not util.safehasattr(supertype, '_sendrequest'): self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) return super(remotefilepeer, self)._callstream(command, **opts) peer.__class__ = remotefilepeer class cacheconnection(object): """The connection for communicating with the remote cache. Performs gets and sets by communicating with an external process that has the cache-specific implementation. """ def __init__(self): self.pipeo = self.pipei = self.pipee = None self.subprocess = None self.connected = False def connect(self, cachecommand): if self.pipeo: raise error.Abort(_(b"cache connection already open")) self.pipei, self.pipeo, self.pipee, self.subprocess = procutil.popen4( cachecommand ) self.connected = True def close(self): def tryclose(pipe): try: pipe.close() except Exception: pass if self.connected: try: self.pipei.write(b"exit\n") except Exception: pass tryclose(self.pipei) self.pipei = None tryclose(self.pipeo) self.pipeo = None tryclose(self.pipee) self.pipee = None try: # Wait for process to terminate, making sure to avoid deadlock. # See https://docs.python.org/2/library/subprocess.html for # warnings about wait() and deadlocking. self.subprocess.communicate() except Exception: pass self.subprocess = None self.connected = False def request(self, request, flush=True): if self.connected: try: self.pipei.write(request) if flush: self.pipei.flush() except IOError: self.close() def receiveline(self): if not self.connected: return None try: result = self.pipeo.readline()[:-1] if not result: self.close() except IOError: self.close() return result def _getfilesbatch( remote, receivemissing, progresstick, missed, idmap, batchsize ): # Over http(s), iterbatch is a streamy method and we can start # looking at results early. This means we send one (potentially # large) request, but then we show nice progress as we process # file results, rather than showing chunks of $batchsize in # progress. # # Over ssh, iterbatch isn't streamy because batch() wasn't # explicitly designed as a streaming method. In the future we # should probably introduce a streambatch() method upstream and # use that for this. with remote.commandexecutor() as e: futures = [] for m in missed: futures.append( e.callcommand( b'x_rfl_getfile', {b'file': idmap[m], b'node': m[-40:]} ) ) for i, m in enumerate(missed): r = futures[i].result() futures[i] = None # release memory file_ = idmap[m] node = m[-40:] receivemissing(io.BytesIO(b'%d\n%s' % (len(r), r)), file_, node) progresstick() def _getfiles_optimistic( remote, receivemissing, progresstick, missed, idmap, step ): remote._callstream(b"x_rfl_getfiles") i = 0 pipeo = remote._pipeo pipei = remote._pipei while i < len(missed): # issue a batch of requests start = i end = min(len(missed), start + step) i = end for missingid in missed[start:end]: # issue new request versionid = missingid[-40:] file = idmap[missingid] sshrequest = b"%s%s\n" % (versionid, file) pipeo.write(sshrequest) pipeo.flush() # receive batch results for missingid in missed[start:end]: versionid = missingid[-40:] file = idmap[missingid] receivemissing(pipei, file, versionid) progresstick() # End the command pipeo.write(b'\n') pipeo.flush() def _getfiles_threaded( remote, receivemissing, progresstick, missed, idmap, step ): remote._callstream(b"x_rfl_getfiles") pipeo = remote._pipeo pipei = remote._pipei def writer(): for missingid in missed: versionid = missingid[-40:] file = idmap[missingid] sshrequest = b"%s%s\n" % (versionid, file) pipeo.write(sshrequest) pipeo.flush() writerthread = threading.Thread(target=writer) writerthread.daemon = True writerthread.start() for missingid in missed: versionid = missingid[-40:] file = idmap[missingid] receivemissing(pipei, file, versionid) progresstick() writerthread.join() # End the command pipeo.write(b'\n') pipeo.flush() class fileserverclient(object): """A client for requesting files from the remote file server.""" def __init__(self, repo): ui = repo.ui self.repo = repo self.ui = ui self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess") if self.cacheprocess: self.cacheprocess = util.expandpath(self.cacheprocess) # This option causes remotefilelog to pass the full file path to the # cacheprocess instead of a hashed key. self.cacheprocesspasspath = ui.configbool( b"remotefilelog", b"cacheprocess.includepath" ) self.debugoutput = ui.configbool(b"remotefilelog", b"debug") self.remotecache = cacheconnection() def setstore(self, datastore, historystore, writedata, writehistory): self.datastore = datastore self.historystore = historystore self.writedata = writedata self.writehistory = writehistory def _connect(self): return self.repo.connectionpool.get(self.repo.fallbackpath) def request(self, fileids): """Takes a list of filename/node pairs and fetches them from the server. Files are stored in the local cache. A list of nodes that the server couldn't find is returned. If the connection fails, an exception is raised. """ if not self.remotecache.connected: self.connect() cache = self.remotecache writedata = self.writedata repo = self.repo total = len(fileids) request = b"get\n%d\n" % total idmap = {} reponame = repo.name for file, id in fileids: fullid = getcachekey(reponame, file, id) if self.cacheprocesspasspath: request += file + b'\0' request += fullid + b"\n" idmap[fullid] = file cache.request(request) progress = self.ui.makeprogress(_(b'downloading'), total=total) progress.update(0) missed = [] while True: missingid = cache.receiveline() if not missingid: missedset = set(missed) for missingid in idmap: if not missingid in missedset: missed.append(missingid) self.ui.warn( _( b"warning: cache connection closed early - " + b"falling back to server\n" ) ) break if missingid == b"0": break if missingid.startswith(b"_hits_"): # receive progress reports parts = missingid.split(b"_") progress.increment(int(parts[2])) continue missed.append(missingid) global fetchmisses fetchmisses += len(missed) fromcache = total - len(missed) progress.update(fromcache, total=total) self.ui.log( b"remotefilelog", b"remote cache hit rate is %r of %r\n", fromcache, total, hit=fromcache, total=total, ) oldumask = os.umask(0o002) try: # receive cache misses from master if missed: # When verbose is true, sshpeer prints 'running ssh...' # to stdout, which can interfere with some command # outputs verbose = self.ui.verbose self.ui.verbose = False try: with self._connect() as conn: remote = conn.peer if remote.capable( constants.NETWORK_CAP_LEGACY_SSH_GETFILES ): if not isinstance(remote, _sshv1peer): raise error.Abort( b'remotefilelog requires ssh servers' ) step = self.ui.configint( b'remotefilelog', b'getfilesstep' ) getfilestype = self.ui.config( b'remotefilelog', b'getfilestype' ) if getfilestype == b'threaded': _getfiles = _getfiles_threaded else: _getfiles = _getfiles_optimistic _getfiles( remote, self.receivemissing, progress.increment, missed, idmap, step, ) elif remote.capable(b"x_rfl_getfile"): if remote.capable(b'batch'): batchdefault = 100 else: batchdefault = 10 batchsize = self.ui.configint( b'remotefilelog', b'batchsize', batchdefault ) self.ui.debug( b'requesting %d files from ' b'remotefilelog server...\n' % len(missed) ) _getfilesbatch( remote, self.receivemissing, progress.increment, missed, idmap, batchsize, ) else: raise error.Abort( b"configured remotefilelog server" b" does not support remotefilelog" ) self.ui.log( b"remotefilefetchlog", b"Success\n", fetched_files=progress.pos - fromcache, total_to_fetch=total - fromcache, ) except Exception: self.ui.log( b"remotefilefetchlog", b"Fail\n", fetched_files=progress.pos - fromcache, total_to_fetch=total - fromcache, ) raise finally: self.ui.verbose = verbose # send to memcache request = b"set\n%d\n%s\n" % (len(missed), b"\n".join(missed)) cache.request(request) progress.complete() # mark ourselves as a user of this cache writedata.markrepo(self.repo.path) finally: os.umask(oldumask) def receivemissing(self, pipe, filename, node): line = pipe.readline()[:-1] if not line: raise error.ResponseError( _(b"error downloading file contents:"), _(b"connection closed early"), ) size = int(line) data = pipe.read(size) if len(data) != size: raise error.ResponseError( _(b"error downloading file contents:"), _(b"only received %s of %s bytes") % (len(data), size), ) self.writedata.addremotefilelognode( filename, bin(node), zlib.decompress(data) ) def connect(self): if self.cacheprocess: cmd = b"%s %s" % (self.cacheprocess, self.writedata._path) self.remotecache.connect(cmd) else: # If no cache process is specified, we fake one that always # returns cache misses. This enables tests to run easily # and may eventually allow us to be a drop in replacement # for the largefiles extension. class simplecache(object): def __init__(self): self.missingids = [] self.connected = True def close(self): pass def request(self, value, flush=True): lines = value.split(b"\n") if lines[0] != b"get": return self.missingids = lines[2:-1] self.missingids.append(b'0') def receiveline(self): if len(self.missingids) > 0: return self.missingids.pop(0) return None self.remotecache = simplecache() def close(self): if fetches: msg = ( b"%d files fetched over %d fetches - " + b"(%d misses, %0.2f%% hit ratio) over %0.2fs\n" ) % ( fetched, fetches, fetchmisses, float(fetched - fetchmisses) / float(fetched) * 100.0, fetchcost, ) if self.debugoutput: self.ui.warn(msg) self.ui.log( b"remotefilelog.prefetch", msg.replace(b"%", b"%%"), remotefilelogfetched=fetched, remotefilelogfetches=fetches, remotefilelogfetchmisses=fetchmisses, remotefilelogfetchtime=fetchcost * 1000, ) if self.remotecache.connected: self.remotecache.close() def prefetch( self, fileids, force=False, fetchdata=True, fetchhistory=False ): """downloads the given file versions to the cache""" repo = self.repo idstocheck = [] for file, id in fileids: # hack # - we don't use .hgtags # - workingctx produces ids with length 42, # which we skip since they aren't in any cache if ( file == b'.hgtags' or len(id) == 42 or not repo.shallowmatch(file) ): continue idstocheck.append((file, bin(id))) datastore = self.datastore historystore = self.historystore if force: datastore = contentstore.unioncontentstore(*repo.shareddatastores) historystore = metadatastore.unionmetadatastore( *repo.sharedhistorystores ) missingids = set() if fetchdata: missingids.update(datastore.getmissing(idstocheck)) if fetchhistory: missingids.update(historystore.getmissing(idstocheck)) # partition missing nodes into nullid and not-nullid so we can # warn about this filtering potentially shadowing bugs. nullids = len( [None for unused, id in missingids if id == self.repo.nullid] ) if nullids: missingids = [ (f, id) for f, id in missingids if id != self.repo.nullid ] repo.ui.develwarn( ( b'remotefilelog not fetching %d null revs' b' - this is likely hiding bugs' % nullids ), config=b'remotefilelog-ext', ) if missingids: global fetches, fetched, fetchcost fetches += 1 # We want to be able to detect excess individual file downloads, so # let's log that information for debugging. if fetches >= 15 and fetches < 18: if fetches == 15: fetchwarning = self.ui.config( b'remotefilelog', b'fetchwarning' ) if fetchwarning: self.ui.warn(fetchwarning + b'\n') self.logstacktrace() missingids = [(file, hex(id)) for file, id in sorted(missingids)] fetched += len(missingids) start = time.time() missingids = self.request(missingids) if missingids: raise error.Abort( _(b"unable to download %d files") % len(missingids) ) fetchcost += time.time() - start self._lfsprefetch(fileids) def _lfsprefetch(self, fileids): if not _lfsmod or not util.safehasattr( self.repo.svfs, b'lfslocalblobstore' ): return if not _lfsmod.wrapper.candownload(self.repo): return pointers = [] store = self.repo.svfs.lfslocalblobstore for file, id in fileids: node = bin(id) rlog = self.repo.file(file) if rlog.flags(node) & revlog.REVIDX_EXTSTORED: text = rlog.rawdata(node) p = _lfsmod.pointer.deserialize(text) oid = p.oid() if not store.has(oid): pointers.append(p) if len(pointers) > 0: self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store) assert all(store.has(p.oid()) for p in pointers) def logstacktrace(self): import traceback self.ui.log( b'remotefilelog', b'excess remotefilelog fetching:\n%s\n', b''.join(pycompat.sysbytes(s) for s in traceback.format_stack()), )
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import errno import json import logging import os import pkgutil import threading import xml.etree.ElementTree as ET from abc import abstractmethod from collections import OrderedDict, defaultdict, namedtuple import six from twitter.common.collections import OrderedSet from pants.backend.jvm.jar_dependency_utils import M2Coordinate, ResolvedJar from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement, PinnedJarArtifactSet) from pants.backend.jvm.targets.exclude import Exclude from pants.backend.jvm.targets.jar_dependency import JarDependency from pants.backend.jvm.targets.jar_library import JarLibrary from pants.base.generator import Generator, TemplateData from pants.base.revision import Revision from pants.build_graph.target import Target from pants.ivy.bootstrapper import Bootstrapper from pants.java.util import execute_runner from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open from pants.util.fileutil import atomic_copy class IvyResolutionStep(object): """Ivy specific class for describing steps of performing resolution.""" # NB(nh): This class is the base class for the ivy resolve and fetch steps. # It also specifies the abstract methods that define the components of resolution steps. def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_cache_dir, global_ivy_workdir): """ :param confs: A tuple of string ivy confs to resolve for. :param hash_name: A unique string name for this resolve. :param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of. :param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the fact. :param ivy_cache_dir: The cache directory used by Ivy for this resolution step. :param global_ivy_workdir: The workdir that all ivy outputs live in. """ self.confs = confs self.hash_name = hash_name self.pinned_artifacts = pinned_artifacts self.soft_excludes = soft_excludes self.ivy_cache_dir = ivy_cache_dir self.global_ivy_workdir = global_ivy_workdir self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs} @abstractmethod def required_load_files_exist(self): """The files required to load a previous resolve exist.""" @abstractmethod def required_exec_files_exist(self): """The files to do a resolve exist.""" @abstractmethod def load(self, targets): """Loads the result of a resolve or fetch.""" @abstractmethod def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory): """Runs the resolve or fetch and loads the result, returning it.""" @property def workdir(self): return os.path.join(self.global_ivy_workdir, self.hash_name) @property def symlink_classpath_filename(self): return os.path.join(self.workdir, 'classpath') @property def ivy_cache_classpath_filename(self): return '{}.raw'.format(self.symlink_classpath_filename) @property def frozen_resolve_file(self): return os.path.join(self.workdir, 'resolution.json') @property def symlink_dir(self): return os.path.join(self.global_ivy_workdir, 'jars') @abstractmethod def ivy_xml_path(self): """Ivy xml location.""" @abstractmethod def resolve_report_path(self, conf): """Location of the resolve report in the workdir.""" def _construct_and_load_symlink_map(self): artifact_paths, symlink_map = IvyUtils.construct_and_load_symlink_map( self.symlink_dir, self.ivy_cache_dir, self.ivy_cache_classpath_filename, self.symlink_classpath_filename) return artifact_paths, symlink_map def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report, workunit_factory, workunit_name): IvyUtils.do_resolve(executor, extra_args, ivyxml, jvm_options, self.workdir_reports_by_conf, self.confs, self.ivy_cache_dir, self.ivy_cache_classpath_filename, hash_name_for_report, workunit_factory, workunit_name) class IvyFetchStep(IvyResolutionStep): """Resolves ivy artifacts using the coordinates from a previous resolve.""" def required_load_files_exist(self): return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and os.path.isfile(self.ivy_cache_classpath_filename) and os.path.isfile(self.frozen_resolve_file)) def resolve_report_path(self, conf): return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf)) @property def ivy_xml_path(self): return os.path.join(self.workdir, 'fetch-ivy.xml') def required_exec_files_exist(self): return os.path.isfile(self.frozen_resolve_file) def load(self, targets): try: frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file, targets) except Exception as e: logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e)) return NO_RESOLVE_RUN_RESULT return self._load_from_fetch(frozen_resolutions) def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory): try: frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file, targets) except Exception as e: logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e)) return NO_RESOLVE_RUN_RESULT self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options, workunit_name, workunit_factory) result = self._load_from_fetch(frozen_resolutions) if not result.all_linked_artifacts_exist(): raise IvyResolveMappingError( 'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir, result)) return result def _load_from_fetch(self, frozen_resolutions): artifact_paths, symlink_map = self._construct_and_load_symlink_map() return IvyFetchResolveResult(artifact_paths, symlink_map, self.hash_name, self.workdir_reports_by_conf, frozen_resolutions) def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name, workunit_factory): # It's important for fetches to have a different ivy report from resolves as their # contents differ. hash_name_for_report = '{}-fetch'.format(self.hash_name) ivyxml = self.ivy_xml_path self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report) self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report, workunit_factory, workunit_name) def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report): # NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not # part of the requested confs. default_resolution = frozen_resolution.get('default') if default_resolution is None: raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.") try: jars = default_resolution.jar_dependencies IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report) except Exception as e: raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e)) class IvyResolveStep(IvyResolutionStep): """Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates.""" def required_load_files_exist(self): return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and os.path.isfile(self.ivy_cache_classpath_filename)) def resolve_report_path(self, conf): return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf)) @property def ivy_xml_path(self): return os.path.join(self.workdir, 'resolve-ivy.xml') def load(self, targets): artifact_paths, symlink_map = self._construct_and_load_symlink_map() return IvyResolveResult(artifact_paths, symlink_map, self.hash_name, self.workdir_reports_by_conf) def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory): self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory) result = self.load(targets) if not result.all_linked_artifacts_exist(): raise IvyResolveMappingError( 'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir, result)) frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets) FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf) return result def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory): safe_mkdir(self.workdir) ivyxml = self.ivy_xml_path hash_name = '{}-resolve'.format(self.hash_name) self._prepare_ivy_xml(targets, ivyxml, hash_name) self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name, workunit_factory, workunit_name) def _prepare_ivy_xml(self, targets, ivyxml, hash_name): # TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better # diagnostics can be had in `IvyUtils.generate_ivy` if this is done. # See: https://github.com/pantsbuild/pants/issues/2239 jars, global_excludes = IvyUtils.calculate_classpath(targets) # Don't pass global excludes to ivy when using soft excludes. if self.soft_excludes: global_excludes = [] IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs, hash_name, self.pinned_artifacts) class FrozenResolution(object): """Contains the abstracted results of a resolve. With this we can do a simple fetch. """ # TODO(nh): include full dependency graph in here. # So that we can inject it into the build graph if we want to. class MissingTarget(Exception): """Thrown when a loaded resolution has a target spec for a target that doesn't exist.""" def __init__(self): self.target_to_resolved_coordinates = defaultdict(OrderedSet) self.all_resolved_coordinates = OrderedSet() self.coordinate_to_attributes = OrderedDict() @property def jar_dependencies(self): return [ JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext, **self.coordinate_to_attributes.get(c, {})) for c in self.all_resolved_coordinates] def add_resolved_jars(self, target, resolved_jars): coords = [j.coordinate for j in resolved_jars] self.add_resolution_coords(target, coords) # Assuming target is a jar library. for j in target.jar_dependencies: if j.url: self.coordinate_to_attributes[j.coordinate] = {'url': j.url} else: self.coordinate_to_attributes[j.coordinate] = {} def add_resolution_coords(self, target, coords): for c in coords: self.target_to_resolved_coordinates[target].add(c) self.all_resolved_coordinates.add(c) def target_spec_to_coordinate_strings(self): return {t.address.spec: [str(c) for c in coordinates] for t, coordinates in self.target_to_resolved_coordinates.items()} def __repr__(self): return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format( '\n '.join(': '.join([t.address.spec, '\n '.join(str(c) for c in cs)]) for t,cs in self.target_to_resolved_coordinates.items()), '\n '.join(str(c) for c in self.coordinate_to_attributes.keys()) ) def __eq__(self, other): return (type(self) == type(other) and self.all_resolved_coordinates == other.all_resolved_coordinates and self.target_to_resolved_coordinates == other.target_to_resolved_coordinates) def __ne__(self, other): return not self == other @classmethod def load_from_file(cls, filename, targets): if not os.path.exists(filename): return None with open(filename) as f: # Using OrderedDict here to maintain insertion order of dict entries. from_file = json.load(f, object_pairs_hook=OrderedDict) result = {} target_lookup = {t.address.spec: t for t in targets} for conf, serialized_resolution in from_file.items(): resolution = FrozenResolution() def m2_for(c): return M2Coordinate.from_string(c) for coord, attr_dict in serialized_resolution['coord_to_attrs'].items(): m2 = m2_for(coord) resolution.coordinate_to_attributes[m2] = attr_dict for spec, coord_strs in serialized_resolution['target_to_coords'].items(): t = target_lookup.get(spec, None) if t is None: raise cls.MissingTarget('Cannot find target for address {} in frozen resolution' .format(spec)) resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs]) result[conf] = resolution return result @classmethod def dump_to_file(cls, filename, resolutions_by_conf): res = {} for conf, resolution in resolutions_by_conf.items(): res[conf] = OrderedDict([ ['target_to_coords',resolution.target_spec_to_coordinate_strings()], ['coord_to_attrs', OrderedDict([str(c), attrs] for c, attrs in resolution.coordinate_to_attributes.items())] ]) with safe_concurrent_creation(filename) as tmp_filename: with open(tmp_filename, 'wb') as f: json.dump(res, f) class IvyResolveResult(object): """The result of an Ivy resolution. The result data includes the list of resolved artifacts, the relationships between those artifacts and the targets that requested them and the hash name of the resolve. """ def __init__(self, resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf): self._reports_by_conf = reports_by_conf self.resolved_artifact_paths = resolved_artifact_paths self.resolve_hash_name = resolve_hash_name self._symlink_map = symlink_map @property def has_resolved_artifacts(self): """The requested targets have a resolution associated with them.""" return self.resolve_hash_name is not None def all_linked_artifacts_exist(self): """All of the artifact paths for this resolve point to existing files.""" if not self.has_resolved_artifacts: return False for path in self.resolved_artifact_paths: if not os.path.isfile(path): return False else: return True def report_for_conf(self, conf): """Returns the path to the ivy report for the provided conf. Returns None if there is no path. """ return self._reports_by_conf.get(conf) def get_frozen_resolutions_by_conf(self, targets): frozen_resolutions_by_conf = OrderedDict() for conf in self._reports_by_conf: frozen_resolution = FrozenResolution() for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets): frozen_resolution.add_resolved_jars(target, resolved_jars) frozen_resolutions_by_conf[conf] = frozen_resolution return frozen_resolutions_by_conf def resolved_jars_for_each_target(self, conf, targets): """Yields the resolved jars for each passed JarLibrary. If there is no report for the requested conf, yields nothing. :param conf: The ivy conf to load jars for. :param targets: The collection of JarLibrary targets to find resolved jars for. :yield: target, resolved_jars :raises IvyTaskMixin.UnresolvedJarError """ ivy_info = self._ivy_info_for(conf) if not ivy_info: return jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)] ivy_jar_memo = {} for target in jar_library_targets: # Add the artifacts from each dependency module. resolved_jars = self._resolved_jars_with_symlinks(conf, ivy_info, ivy_jar_memo, self._jar_dependencies_for_target(conf, target), target) yield target, resolved_jars def _jar_dependencies_for_target(self, conf, target): return target.jar_dependencies def _ivy_info_for(self, conf): report_path = self._reports_by_conf.get(conf) return IvyUtils.parse_xml_report(conf, report_path) def _new_resolved_jar_with_symlink_path(self, conf, target, resolved_jar_without_symlink): def candidate_cache_paths(): # There is a focus on being lazy here to avoid `os.path.realpath` when we can. yield resolved_jar_without_symlink.cache_path yield os.path.realpath(resolved_jar_without_symlink.cache_path) for cache_path in candidate_cache_paths(): pants_path = self._symlink_map.get(cache_path) if pants_path: break else: raise IvyResolveMappingError( 'Jar {resolved_jar} in {spec} not resolved to the ivy ' 'symlink map in conf {conf}.' .format(spec=target.address.spec, resolved_jar=resolved_jar_without_symlink.cache_path, conf=conf)) return ResolvedJar(coordinate=resolved_jar_without_symlink.coordinate, pants_path=pants_path, cache_path=resolved_jar_without_symlink.cache_path) def _resolved_jars_with_symlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target): raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates, memo=ivy_jar_memo) resolved_jars = [self._new_resolved_jar_with_symlink_path(conf, target, raw_resolved_jar) for raw_resolved_jar in raw_resolved_jars] return resolved_jars class IvyFetchResolveResult(IvyResolveResult): """A resolve result that uses the frozen resolution to look up dependencies.""" def __init__(self, resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf, frozen_resolutions): super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf) self._frozen_resolutions = frozen_resolutions def _jar_dependencies_for_target(self, conf, target): return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ()) NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {}) IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers']) Dependency = namedtuple('DependencyAttributes', ['org', 'name', 'rev', 'mutable', 'force', 'transitive']) Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier']) logger = logging.getLogger(__name__) class IvyResolveMappingError(Exception): """Raised when there is a failure mapping the ivy resolve results to pants objects.""" class IvyModuleRef(object): """ :API: public """ # latest.integration is ivy magic meaning "just get the latest version" _ANY_REV = 'latest.integration' def __init__(self, org, name, rev, classifier=None, ext=None): self.org = org self.name = name self.rev = rev self.classifier = classifier self.ext = ext or 'jar' self._id = (self.org, self.name, self.rev, self.classifier, self.ext) def __eq__(self, other): return isinstance(other, IvyModuleRef) and self._id == other._id def __ne__(self, other): return not self == other def __hash__(self): return hash(self._id) def __str__(self): return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id)) def __repr__(self): return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})' .format(*self._id)) def __cmp__(self, other): # We can't just re-use __repr__ or __str_ because we want to order rev last return cmp((self.org, self.name, self.classifier, self.ext, self.rev), (other.org, other.name, other.classifier, other.ext, other.rev)) @property def caller_key(self): """This returns an identifier for an IvyModuleRef that only retains the caller org and name. Ivy represents dependees as `<caller/>`'s with just org and name and rev information. This method returns a `<caller/>` representation of the current ref. """ return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV) @property def unversioned(self): """This returns an identifier for an IvyModuleRef without version information. It's useful because ivy might return information about a different version of a dependency than the one we request, and we want to ensure that all requesters of any version of that dependency are able to learn about it. """ return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier, ext=self.ext) class IvyInfo(object): """ :API: public """ def __init__(self, conf): self._conf = conf self.modules_by_ref = {} # Map from ref to referenced module. self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref # Map from ref of caller to refs of modules required by that caller. self._deps_by_caller = defaultdict(OrderedSet) # Map from _unversioned_ ref to OrderedSet of IvyArtifact instances. self._artifacts_by_ref = defaultdict(OrderedSet) def add_module(self, module): if not module.artifact: # Module was evicted, so do not record information about it return ref_unversioned = module.ref.unversioned if ref_unversioned in self.refs_by_unversioned_refs: raise IvyResolveMappingError('Already defined module {}, as rev {}!' .format(ref_unversioned, module.ref.rev)) if module.ref in self.modules_by_ref: raise IvyResolveMappingError('Already defined module {}, would be overwritten!' .format(module.ref)) self.refs_by_unversioned_refs[ref_unversioned] = module.ref self.modules_by_ref[module.ref] = module for caller in module.callers: self._deps_by_caller[caller.caller_key].add(module.ref) self._artifacts_by_ref[ref_unversioned].add(module.artifact) def _do_traverse_dependency_graph(self, ref, collector, memo, visited): memoized_value = memo.get(ref) if memoized_value: return memoized_value if ref in visited: # Ivy allows for circular dependencies # If we're here, that means we're resolving something that # transitively depends on itself return set() visited.add(ref) acc = collector(ref) # NB(zundel): ivy does not return deps in a consistent order for the same module for # different resolves. Sort them to get consistency and prevent cache invalidation. # See https://github.com/pantsbuild/pants/issues/2607 deps = sorted(self._deps_by_caller.get(ref.caller_key, ())) for dep in deps: acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited)) memo[ref] = acc return acc def traverse_dependency_graph(self, ref, collector, memo=None): """Traverses module graph, starting with ref, collecting values for each ref into the sets created by the collector function. :param ref an IvyModuleRef to start traversing the ivy dependency graph :param collector a function that takes a ref and returns a new set of values to collect for that ref, which will also be updated with all the dependencies accumulated values :param memo is a dict of ref -> set that memoizes the results of each node in the graph. If provided, allows for retaining cache across calls. :returns the accumulated set for ref """ resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned) if resolved_ref: ref = resolved_ref if memo is None: memo = dict() visited = set() return self._do_traverse_dependency_graph(ref, collector, memo, visited) def get_resolved_jars_for_coordinates(self, coordinates, memo=None): """Collects jars for the passed coordinates. Because artifacts are only fetched for the "winning" version of a module, the artifacts will not always represent the version originally declared by the library. This method is transitive within the passed coordinates dependencies. :param coordinates collections.Iterable: Collection of coordinates to collect transitive resolved jars for. :param memo: See `traverse_dependency_graph`. :returns: All the artifacts for all of the jars for the provided coordinates, including transitive dependencies. :rtype: list of :class:`pants.backend.jvm.jar_dependency_utils.ResolvedJar` """ def to_resolved_jar(jar_ref, jar_path): return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org, name=jar_ref.name, rev=jar_ref.rev, classifier=jar_ref.classifier, ext=jar_ref.ext), cache_path=jar_path) resolved_jars = OrderedSet() def create_collection(dep): return OrderedSet([dep]) for jar in coordinates: classifier = jar.classifier if self._conf == 'default' else self._conf jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier) for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo): for artifact_path in self._artifacts_by_ref[module_ref.unversioned]: resolved_jars.add(to_resolved_jar(module_ref, artifact_path)) return resolved_jars def __repr__(self): return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys()) class IvyUtils(object): """Useful methods related to interaction with ivy. :API: public """ # Protects ivy executions. _ivy_lock = threading.RLock() # Protect writes to the global map of jar path -> symlinks to that jar. _symlink_map_lock = threading.Lock() INTERNAL_ORG_NAME = 'internal' class IvyError(Exception): """Indicates an error preparing an ivy operation.""" class IvyResolveReportError(IvyError): """Indicates that an ivy report cannot be found.""" class IvyResolveConflictingDepsError(IvyError): """Indicates two or more locally declared dependencies conflict.""" class BadRevisionError(IvyError): """Indicates an unparseable version number.""" @staticmethod def _generate_exclude_template(exclude): return TemplateData(org=exclude.org, name=exclude.name) @staticmethod def _generate_override_template(jar): return TemplateData(org=jar.org, module=jar.name, version=jar.rev) @staticmethod def _load_classpath_from_cachepath(path): if not os.path.exists(path): return [] else: with safe_open(path, 'r') as cp: return filter(None, (path.strip() for path in cp.read().split(os.pathsep))) @classmethod def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf, confs, ivy_cache_dir, ivy_cache_classpath_filename, resolve_hash_name, workunit_factory, workunit_name): """Execute Ivy with the given ivy.xml and copies all relevant files into the workdir. This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending on whether there is an existing frozen resolution. After it is run, the Ivy reports are copied into the workdir at the paths specified by workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts and their transitive dependencies. :param executor: A JVM executor to use to invoke ivy. :param extra_args: Extra arguments to pass to ivy. :param ivyxml: The input ivy.xml containing the dependencies to resolve. :param jvm_options: A list of jvm option strings to use for the ivy invoke, or None. :param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir. :param confs: The confs used in the resolve. :param resolve_hash_name: The hash to use as the module name for finding the ivy report file. :param workunit_factory: A workunit factory for the ivy invoke, or None. :param workunit_name: A workunit name for the ivy invoke, or None. """ ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory) with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp: extra_args = extra_args or [] args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args with cls._ivy_lock: cls._exec_ivy(ivy, confs, ivyxml, args, jvm_options=jvm_options, executor=executor, workunit_name=workunit_name, workunit_factory=workunit_factory) if not os.path.exists(raw_target_classpath_file_tmp): raise cls.IvyError('Ivy failed to create classpath file at {}' .format(raw_target_classpath_file_tmp)) cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name) logger.debug('Moved ivy classfile file to {dest}' .format(dest=ivy_cache_classpath_filename)) @classmethod def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name): for conf in confs: ivy_cache_report_path = IvyUtils.xml_report_path(ivy_cache_dir, resolve_hash_name, conf) workdir_report_path = workdir_report_paths_by_conf[conf] try: atomic_copy(ivy_cache_report_path, workdir_report_path) except IOError as e: raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}' .format(ivy_cache_report_path, workdir_report_path, e)) @classmethod def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor, workunit_name, workunit_factory): ivy = ivy or Bootstrapper.default_ivy() ivy_args = ['-ivy', ivyxml] ivy_args.append('-confs') ivy_args.extend(confs) ivy_args.extend(args) ivy_jvm_options = list(jvm_options) # Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng. ivy_jvm_options.append('-Dsun.io.useCanonCaches=false') runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor) try: with ivy.resolution_lock: result = execute_runner(runner, workunit_factory=workunit_factory, workunit_name=workunit_name) if result != 0: raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result, cmd=runner.cmd)) except runner.executor.Error as e: raise IvyUtils.IvyError(e) @classmethod def construct_and_load_symlink_map(cls, symlink_dir, ivy_cache_dir, ivy_cache_classpath_filename, symlink_classpath_filename): # Make our actual classpath be symlinks, so that the paths are uniform across systems. # Note that we must do this even if we read the raw_target_classpath_file from the artifact # cache. If we cache the target_classpath_file we won't know how to create the symlinks. with IvyUtils._symlink_map_lock: # A common dir for symlinks into the ivy2 cache. This ensures that paths to jars # in artifact-cached analysis files are consistent across systems. # Note that we have one global, well-known symlink dir, again so that paths are # consistent across builds. symlink_map = cls._symlink_cachepath(ivy_cache_dir, ivy_cache_classpath_filename, symlink_dir, symlink_classpath_filename) classpath = cls._load_classpath_from_cachepath(symlink_classpath_filename) return classpath, symlink_map @classmethod def _symlink_cachepath(cls, ivy_cache_dir, inpath, symlink_dir, outpath): """Symlinks all paths listed in inpath that are under ivy_cache_dir into symlink_dir. If there is an existing symlink for a file under inpath, it is used rather than creating a new symlink. Preserves all other paths. Writes the resulting paths to outpath. Returns a map of path -> symlink to that path. """ safe_mkdir(symlink_dir) # The ivy_cache_dir might itself be a symlink. In this case, ivy may return paths that # reference the realpath of the .jar file after it is resolved in the cache dir. To handle # this case, add both the symlink'ed path and the realpath to the jar to the symlink map. real_ivy_cache_dir = os.path.realpath(ivy_cache_dir) symlink_map = OrderedDict() inpaths = cls._load_classpath_from_cachepath(inpath) paths = OrderedSet([os.path.realpath(path) for path in inpaths]) for path in paths: if path.startswith(real_ivy_cache_dir): symlink_map[path] = os.path.join(symlink_dir, os.path.relpath(path, real_ivy_cache_dir)) else: # This path is outside the cache. We won't symlink it. symlink_map[path] = path # Create symlinks for paths in the ivy cache dir. for path, symlink in six.iteritems(symlink_map): if path == symlink: # Skip paths that aren't going to be symlinked. continue safe_mkdir(os.path.dirname(symlink)) try: os.symlink(path, symlink) except OSError as e: # We don't delete and recreate the symlink, as this may break concurrently executing code. if e.errno != errno.EEXIST: raise # (re)create the classpath with all of the paths with safe_open(outpath, 'w') as outfile: outfile.write(':'.join(OrderedSet(symlink_map.values()))) return dict(symlink_map) @classmethod def xml_report_path(cls, cache_dir, resolve_hash_name, conf): """The path to the xml report ivy creates after a retrieve. :API: public :param string cache_dir: The path of the ivy cache dir used for resolves. :param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for resolution. :param string conf: The ivy conf name (e.g. "default"). :returns: The report path. :rtype: string """ return os.path.join(cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME, resolve_hash_name, conf)) @classmethod def parse_xml_report(cls, conf, path): """Parse the ivy xml report corresponding to the name passed to ivy. :API: public :param string conf: the ivy conf name (e.g. "default") :param string path: The path to the ivy report file. :returns: The info in the xml report. :rtype: :class:`IvyInfo` :raises: :class:`IvyResolveMappingError` if no report exists. """ if not os.path.exists(path): raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path)) logger.debug("Parsing ivy report {}".format(path)) ret = IvyInfo(conf) etree = ET.parse(path) doc = etree.getroot() for module in doc.findall('dependencies/module'): org = module.get('organisation') name = module.get('name') for revision in module.findall('revision'): rev = revision.get('name') callers = [] for caller in revision.findall('caller'): callers.append(IvyModuleRef(caller.get('organisation'), caller.get('name'), caller.get('callerrev'))) for artifact in revision.findall('artifacts/artifact'): classifier = artifact.get('extra-classifier') ext = artifact.get('ext') ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev, classifier=classifier, ext=ext) artifact_cache_path = artifact.get('location') ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers)) ret.add_module(ivy_module) return ret @classmethod def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None, pinned_artifacts=None, jar_dep_manager=None): if not resolve_hash_name: resolve_hash_name = Target.maybe_readable_identify(targets) return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts, jar_dep_manager) @classmethod def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None, jar_dep_manager=None): org = IvyUtils.INTERNAL_ORG_NAME name = resolve_hash_name extra_configurations = [conf for conf in confs if conf and conf != 'default'] jars_by_key = OrderedDict() for jar in jars: jars = jars_by_key.setdefault((jar.org, jar.name), []) jars.append(jar) manager = jar_dep_manager or JarDependencyManagement.global_instance() artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it. for jars in jars_by_key.values(): for i, dep in enumerate(jars): direct_coord = M2Coordinate.create(dep) managed_coord = artifact_set[direct_coord] if direct_coord.rev != managed_coord.rev: # It may be necessary to actually change the version number of the jar we want to resolve # here, because overrides do not apply directly (they are exclusively transitive). This is # actually a good thing, because it gives us more control over what happens. coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force) jars[i] = dep.copy(rev=coord.rev) elif dep.force: # If this dependency is marked as 'force' and there is no version conflict, use the normal # pants behavior for 'force'. artifact_set.put(direct_coord) dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()] # As it turns out force is not transitive - it only works for dependencies pants knows about # directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs # don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to # edit the generated ivy.xml and use the override feature [3] though and that does work # transitively as you'd hope. # # [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html # [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/ # src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java # [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html overrides = [cls._generate_override_template(_coord) for _coord in artifact_set] excludes = [cls._generate_exclude_template(exclude) for exclude in excludes] template_data = TemplateData( org=org, module=name, extra_configurations=extra_configurations, dependencies=dependencies, excludes=excludes, overrides=overrides) template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.mustache') cls._write_ivy_xml_file(ivyxml, template_data, template_relpath) @classmethod def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name): """Generates an ivy xml with all jars marked as intransitive using the all conflict manager.""" org = IvyUtils.INTERNAL_ORG_NAME name = resolve_hash_name extra_configurations = [conf for conf in confs if conf and conf != 'default'] # Use org name _and_ rev so that we can have dependencies with different versions. This will # allow for batching fetching if we want to do that. jars_by_key = OrderedDict() for jar in jars: jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar) dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()] template_data = TemplateData(org=org, module=name, extra_configurations=extra_configurations, dependencies=dependencies) template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.mustache') cls._write_ivy_xml_file(ivyxml, template_data, template_relpath) @classmethod def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath): template_text = pkgutil.get_data(__name__, template_relpath) generator = Generator(template_text, lib=template_data) with safe_open(ivyxml, 'w') as output: generator.write(output) @classmethod def calculate_classpath(cls, targets): """Creates a consistent classpath and list of excludes for the passed targets. It also modifies the JarDependency objects' excludes to contain all the jars excluded by provides. :param iterable targets: List of targets to collect JarDependencies and excludes from. :returns: A pair of a list of JarDependencies, and a set of excludes to apply globally. """ jars = OrderedDict() global_excludes = set() provide_excludes = set() targets_processed = set() # Support the ivy force concept when we sanely can for internal dep conflicts. # TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking # strategy generally. def add_jar(jar): # TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its # attachments (classified artifacts) at another. Ivy does not, allow this, the dependency # can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict # resolution happens at the classifier level, allowing skew in a # multi-artifact/multi-classifier dependency. We only find out about the skew later in # `_generate_jar_template` below which will blow up with a conflict. Move this logic closer # together to get a more clear validate, then emit ivy.xml then resolve flow instead of the # spread-out validations happening here. # See: https://github.com/pantsbuild/pants/issues/2239 coordinate = (jar.org, jar.name, jar.classifier) existing = jars.get(coordinate) jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing, proposed=jar) def collect_jars(target): if isinstance(target, JarLibrary): for jar in target.jar_dependencies: add_jar(jar) def collect_excludes(target): target_excludes = target.payload.get_field_value('excludes') if target_excludes: global_excludes.update(target_excludes) def collect_provide_excludes(target): if not target.is_exported: return logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format( target.provides.org, target.provides.name, target)) provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name)) def collect_elements(target): targets_processed.add(target) collect_jars(target) collect_excludes(target) collect_provide_excludes(target) for target in targets: target.walk(collect_elements, predicate=lambda target: target not in targets_processed) # If a source dep is exported (ie, has a provides clause), it should always override # remote/binary versions of itself, ie "round trip" dependencies. # TODO: Move back to applying provides excludes as target-level excludes when they are no # longer global. if provide_excludes: additional_excludes = tuple(provide_excludes) jars = {coordinate: jar.copy(excludes=jar.excludes + additional_excludes) for coordinate, jar in jars.items()} return jars.values(), global_excludes @classmethod def _resolve_conflict(cls, existing, proposed): if existing.rev is None: return proposed if proposed.rev is None: return existing if proposed == existing: if proposed.force: return proposed return existing elif existing.force and proposed.force: raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format( proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev )) elif existing.force: logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format( proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev )) return existing elif proposed.force: logger.debug('Forcing {}#{};{} from {} to {}'.format( proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev )) return proposed else: if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev): logger.debug('Upgrading {}#{};{} from rev {} to {}'.format( proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev, )) return proposed else: return existing @classmethod def _generate_jar_template(cls, jars): global_dep_attributes = set(Dependency(org=jar.org, name=jar.name, rev=jar.rev, mutable=jar.mutable, force=jar.force, transitive=jar.transitive) for jar in jars) if len(global_dep_attributes) != 1: # TODO: Need to provide information about where these came from - could be # far-flung JarLibrary targets. The jars here were collected from targets via # `calculate_classpath` above so executing this step there instead may make more # sense. conflicting_dependencies = sorted(str(g) for g in global_dep_attributes) raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}' .format('\n\t'.join(conflicting_dependencies))) jar_attributes = global_dep_attributes.pop() excludes = set() for jar in jars: excludes.update(jar.excludes) any_have_url = False artifacts = OrderedDict() for jar in jars: ext = jar.ext url = jar.url if url: any_have_url = True classifier = jar.classifier artifact = Artifact(name=jar.name, type_=ext or 'jar', ext=ext, url=url, classifier=classifier) artifacts[(ext, url, classifier)] = artifact template = TemplateData( org=jar_attributes.org, module=jar_attributes.name, version=jar_attributes.rev, mutable=jar_attributes.mutable, force=jar_attributes.force, transitive=jar_attributes.transitive, artifacts=artifacts.values(), any_have_url=any_have_url, excludes=[cls._generate_exclude_template(exclude) for exclude in excludes]) return template @classmethod def _generate_fetch_jar_template(cls, jars): global_dep_attributes = set(Dependency(org=jar.org, name=jar.name, rev=jar.rev, transitive=False, mutable=jar.mutable, force=True) for jar in jars) if len(global_dep_attributes) != 1: # If we batch fetches and assume conflict manager all, we could ignore these. # Leaving this here for now. conflicting_dependencies = sorted(str(g) for g in global_dep_attributes) raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}' .format('\n\t'.join(conflicting_dependencies))) jar_attributes = global_dep_attributes.pop() any_have_url = False artifacts = OrderedDict() for jar in jars: ext = jar.ext url = jar.url if url: any_have_url = True classifier = jar.classifier artifact = Artifact(name=jar.name, type_=ext or 'jar', ext=ext, url=url, classifier=classifier) artifacts[(ext, url, classifier)] = artifact template = TemplateData( org=jar_attributes.org, module=jar_attributes.name, version=jar_attributes.rev, mutable=jar_attributes.mutable, artifacts=artifacts.values(), any_have_url=any_have_url, excludes=[]) return template
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import errno import logging as std_logging import os import random import signal import sys import time try: # Importing just the symbol here because the io module does not # exist in Python 2.6. from io import UnsupportedOperation # noqa except ImportError: # Python 2.6 UnsupportedOperation = None import eventlet from eventlet import event from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from conveyor.common import eventlet_backdoor from conveyor.common.gettextutils import _LE from conveyor.common.gettextutils import _LI from conveyor.common.gettextutils import _LW from conveyor.common import systemd from conveyor.common import threadgroup rpc = importutils.try_import('conveyor.rpc') CONF = cfg.CONF LOG = logging.getLogger(__name__) def _sighup_supported(): return hasattr(signal, 'SIGHUP') def _is_daemon(): # The process group for a foreground process will match the # process group of the controlling terminal. If those values do # not match, or ioctl() fails on the stdout file handle, we assume # the process is running in the background as a daemon. # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics try: is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) except OSError as err: if err.errno == errno.ENOTTY: # Assume we are a daemon because there is no terminal. is_daemon = True else: raise except UnsupportedOperation: # Could not get the fileno for stdout, so we must be a daemon. is_daemon = True return is_daemon def _is_sighup_and_daemon(signo): if not (_sighup_supported() and signo == signal.SIGHUP): # Avoid checking if we are a daemon, because the signal isn't # SIGHUP. return False return _is_daemon() def _signo_to_signame(signo): signals = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'} if _sighup_supported(): signals[signal.SIGHUP] = 'SIGHUP' return signals[signo] def _set_signals_handler(handler): signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) if _sighup_supported(): signal.signal(signal.SIGHUP, handler) class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher. :returns: None """ self.services = Services() self.backdoor_port = eventlet_backdoor.initialize_if_enabled() def launch_service(self, service): """Load and start the given service. :param service: The service you would like to start. :returns: None """ service.backdoor_port = self.backdoor_port self.services.add(service) def stop(self): """Stop all services which are currently running. :returns: None """ self.services.stop() def wait(self): """Waits until all services have been stopped, and then returns. :returns: None """ self.services.wait() def restart(self): """Reload config files and restart service. :returns: None """ cfg.CONF.reload_config_files() self.services.restart() class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo class ServiceLauncher(Launcher): def _handle_signal(self, signo, frame): # Allow the process to be killed again and die from natural causes _set_signals_handler(signal.SIG_DFL) raise SignalExit(signo) def handle_signal(self): _set_signals_handler(self._handle_signal) def _wait_for_exit_or_signal(self, ready_callback=None): status = None signo = 0 LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: if ready_callback: ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code finally: self.stop() if rpc: try: rpc.cleanup() except Exception: # We're shutting down, so it doesn't matter at this point. LOG.exception(_LE('Exception during rpc cleanup.')) return status, signo def wait(self, ready_callback=None): systemd.notify_once() while True: self.handle_signal() status, signo = self._wait_for_exit_or_signal(ready_callback) if not _is_sighup_and_daemon(signo): return status self.restart() class ServiceWrapper(object): def __init__(self, service, workers): self.service = service self.workers = workers self.children = set() self.forktimes = [] class ProcessLauncher(object): def __init__(self, wait_interval=0.01): """Constructor. :param wait_interval: The interval to sleep for between checks of child process exit. """ self.children = {} self.sigcaught = None self.running = True self.wait_interval = wait_interval rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') self.handle_signal() def handle_signal(self): _set_signals_handler(self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes _set_signals_handler(signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_LI('Parent process has died unexpectedly, exiting')) sys.exit(1) def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) def _sighup(*args): signal.signal(signal.SIGHUP, signal.SIG_DFL) raise SignalExit(signal.SIGHUP) signal.signal(signal.SIGTERM, _sigterm) if _sighup_supported(): signal.signal(signal.SIGHUP, _sighup) # Block SIGINT and let the parent send us a SIGTERM signal.signal(signal.SIGINT, signal.SIG_IGN) def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Child caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo def _child_process(self, service): self._child_process_handle_signal() # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn_n(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher() launcher.launch_service(service) return launcher def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info(_LI('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: launcher = self._child_process(wrap.service) while True: self._child_process_handle_signal() status, signo = self._child_wait_for_exit_or_signal(launcher) if not _is_sighup_and_daemon(signo): break launcher.restart() os._exit(status) LOG.info(_LI('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap return pid def launch_service(self, service, workers=1): wrap = ServiceWrapper(service, workers) LOG.info(_LI('Starting %d workers'), wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_LI('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_LW('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap def _respawn_children(self): while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(self.wait_interval) continue while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def wait(self): """Loop waiting on children to die and respawning as necessary.""" systemd.notify_once() LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() # No signal means that stop was called. Don't clean up here. if not self.sigcaught: return signame = _signo_to_signame(self.sigcaught) LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) self.stop() def stop(self): """Terminate child processes and wait on each.""" self.running = False for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child() class Service(object): """Service object for binaries running on hosts.""" def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) # signal that the service is done shutting itself down: self._done = event.Event() def reset(self): # NOTE(Fengqian): docs for Event.reset() recommend against using it self._done = event.Event() def start(self): pass def stop(self): self.tg.stop() self.tg.wait() # Signal that service cleanup is done: if not self._done.ready(): self._done.send() def wait(self): self._done.wait() class Services(object): def __init__(self): self.services = [] self.tg = threadgroup.ThreadGroup() self.done = event.Event() def add(self, service): self.services.append(service) self.tg.add_thread(self.run_service, service, self.done) def stop(self): # wait for graceful shutdown of services: for service in self.services: service.stop() service.wait() # Each service has performed cleanup, now signal that the run_service # wrapper threads can now die: if not self.done.ready(): self.done.send() # reap threads: self.tg.stop() def wait(self): self.tg.wait() def restart(self): self.stop() self.done = event.Event() for restart_service in self.services: restart_service.reset() self.tg.add_thread(self.run_service, restart_service, self.done) @staticmethod def run_service(service, done): """Service start wrapper. :param service: service to run :param done: event to wait on until a shutdown is triggered :returns: None """ service.start() done.wait() def launch(service, workers=1): if workers is None or workers == 1: launcher = ServiceLauncher() launcher.launch_service(service) else: launcher = ProcessLauncher() launcher.launch_service(service, workers=workers) return launcher
# -*- coding: utf-8 -*- from django.conf import settings from django.test import TestCase, override_settings from django.db.migrations.autodetector import MigrationAutodetector from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.state import ProjectState, ModelState from django.db.migrations.graph import MigrationGraph from django.db.migrations.loader import MigrationLoader from django.db import models, connection from django.contrib.auth.models import AbstractBaseUser class DeconstructableObject(object): """ A custom deconstructable object. """ def deconstruct(self): return self.__module__ + '.' + self.__class__.__name__, [], {} class AutodetectorTests(TestCase): """ Tests the migration autodetector. """ author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))]) author_name = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))]) author_name_longer = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400))]) author_name_renamed = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200))]) author_name_default = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default='Ada Lovelace'))]) author_name_deconstructable_1 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructableObject()))]) author_name_deconstructable_2 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructableObject()))]) author_name_deconstructable_3 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField()))]) author_name_deconstructable_4 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField()))]) author_with_book = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))]) author_with_book_order_wrt = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))], options={"order_with_respect_to": "book"}) author_renamed_with_book = ModelState("testapp", "Writer", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))]) author_with_publisher_string = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher_name", models.CharField(max_length=200))]) author_with_publisher = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher", models.ForeignKey("testapp.Publisher"))]) author_with_custom_user = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("user", models.ForeignKey("thirdapp.CustomUser"))]) author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", )) author_proxy_options = ModelState("testapp", "AuthorProxy", [], {"proxy": True, "verbose_name": "Super Author"}, ("testapp.author", )) author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", )) author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", )) author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", )) author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", )) author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", )) author_with_m2m = ModelState("testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("publishers", models.ManyToManyField("testapp.Publisher")), ]) author_with_m2m_through = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract"))]) author_with_options = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))], {"verbose_name": "Authi", "permissions": [('can_hire', 'Can hire')]}) contract = ModelState("testapp", "Contract", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("publisher", models.ForeignKey("testapp.Publisher"))]) publisher = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100))]) publisher_with_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("name", models.CharField(max_length=100))]) publisher_with_aardvark_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Aardvark")), ("name", models.CharField(max_length=100))]) publisher_with_book = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("otherapp.Book")), ("name", models.CharField(max_length=100))]) other_pony = ModelState("otherapp", "Pony", [("id", models.AutoField(primary_key=True))]) other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))]) third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))]) book = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))]) book_proxy_fk = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("thirdapp.AuthorProxy")), ("title", models.CharField(max_length=200))]) book_migrations_fk = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("migrations.UnmigratedModel")), ("title", models.CharField(max_length=200))]) book_with_no_author = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=200))]) book_with_author_renamed = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Writer")), ("title", models.CharField(max_length=200))]) book_with_field_and_author_renamed = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("writer", models.ForeignKey("testapp.Writer")), ("title", models.CharField(max_length=200))]) book_with_multiple_authors = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author")), ("title", models.CharField(max_length=200))]) book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")), ("title", models.CharField(max_length=200))]) book_unique = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": set([("author", "title")])}) book_unique_2 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": set([("title", "author")])}) book_unique_3 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("newfield", models.IntegerField()), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": set([("title", "newfield")])}) attribution = ModelState("otherapp", "Attribution", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("book", models.ForeignKey("otherapp.Book"))]) edition = ModelState("thirdapp", "Edition", [("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book"))]) custom_user = ModelState("thirdapp", "CustomUser", [("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255))], bases=(AbstractBaseUser, )) custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255))]) aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))]) aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))]) aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", )) aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [("id", models.OneToOneField("testapp.Author", primary_key=True))]) knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))]) rabbit = ModelState("eggs", "Rabbit", [("id", models.AutoField(primary_key=True)), ("knight", models.ForeignKey("eggs.Knight")), ("parent", models.ForeignKey("eggs.Rabbit"))], {"unique_together": set([("parent", "knight")])}) def repr_changes(self, changes): output = "" for app_label, migrations in sorted(changes.items()): output += " %s:\n" % app_label for migration in migrations: output += " %s\n" % migration.name for operation in migration.operations: output += " %s\n" % operation return output def assertNumberMigrations(self, changes, app_label, number): if len(changes.get(app_label, [])) != number: self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % ( len(changes.get(app_label, [])), app_label, number, self.repr_changes(changes), )) def assertOperationTypes(self, changes, app_label, index, types): if not changes.get(app_label, None): self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes))) if len(changes[app_label]) < index + 1: self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes))) migration = changes[app_label][index] real_types = [operation.__class__.__name__ for operation in migration.operations] if types != real_types: self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % ( app_label, migration.name, types, self.repr_changes(changes), )) def assertOperationAttributes(self, changes, app_label, index, operation_index, **attrs): if not changes.get(app_label, None): self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes))) if len(changes[app_label]) < index + 1: self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes))) migration = changes[app_label][index] if len(changes[app_label]) < index + 1: self.fail("No operation at index %s for %s.%s\n%s" % ( operation_index, app_label, migration.name, self.repr_changes(changes), )) operation = migration.operations[operation_index] for attr, value in attrs.items(): if getattr(operation, attr, None) != value: self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % ( app_label, migration.name, operation_index, attr, value, getattr(operation, attr, None), self.repr_changes(changes), )) def make_project_state(self, model_states): "Shortcut to make ProjectStates from lists of predefined models" project_state = ProjectState() for model_state in model_states: project_state.add_model_state(model_state.clone()) return project_state def test_arrange_for_graph(self): "Tests auto-naming of migrations for graph matching." # Make a fake graph graph = MigrationGraph() graph.add_node(("testapp", "0001_initial"), None) graph.add_node(("testapp", "0002_foobar"), None) graph.add_node(("otherapp", "0001_initial"), None) graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial")) graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial")) # Use project state to make a new migration change set before = self.make_project_state([]) after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Run through arrange_for_graph changes = autodetector.arrange_for_graph(changes, graph) # Make sure there's a new name, deps match, etc. self.assertEqual(changes["testapp"][0].name, "0003_author") self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")]) self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable") self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")]) def test_trim_apps(self): "Tests that trim does not remove dependencies but does remove unwanted apps" # Use project state to make a new migration change set before = self.make_project_state([]) after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing]) autodetector = MigrationAutodetector(before, after, MigrationQuestioner(defaults={"ask_initial": True})) changes = autodetector._detect_changes() # Run through arrange_for_graph graph = MigrationGraph() changes = autodetector.arrange_for_graph(changes, graph) changes["testapp"][0].dependencies.append(("otherapp", "0001_initial")) changes = autodetector._trim_to_apps(changes, set(["testapp"])) # Make sure there's the right set of migrations self.assertEqual(changes["testapp"][0].name, "0001_initial") self.assertEqual(changes["otherapp"][0].name, "0001_initial") self.assertNotIn("thirdapp", changes) def test_custom_migration_name(self): "Tests custom naming of migrations for graph matching." # Make a fake graph graph = MigrationGraph() graph.add_node(("testapp", "0001_initial"), None) graph.add_node(("testapp", "0002_foobar"), None) graph.add_node(("otherapp", "0001_initial"), None) graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial")) # Use project state to make a new migration change set before = self.make_project_state([]) after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Run through arrange_for_graph migration_name = 'custom_name' changes = autodetector.arrange_for_graph(changes, graph, migration_name) # Make sure there's a new name, deps match, etc. self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name) self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")]) self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name) self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")]) def test_new_model(self): "Tests autodetection of new models" # Make state before = self.make_project_state([]) after = self.make_project_state([self.author_empty]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "CreateModel") self.assertEqual(action.name, "Author") def test_old_model(self): "Tests deletion of old models" # Make state before = self.make_project_state([self.author_empty]) after = self.make_project_state([]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "DeleteModel") self.assertEqual(action.name, "Author") def test_add_field(self): "Tests autodetection of new fields" # Make state before = self.make_project_state([self.author_empty]) after = self.make_project_state([self.author_name]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AddField") self.assertEqual(action.name, "name") def test_remove_field(self): "Tests autodetection of removed fields" # Make state before = self.make_project_state([self.author_name]) after = self.make_project_state([self.author_empty]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "RemoveField") self.assertEqual(action.name, "name") def test_alter_field(self): "Tests autodetection of new fields" # Make state before = self.make_project_state([self.author_name]) after = self.make_project_state([self.author_name_longer]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AlterField") self.assertEqual(action.name, "name") def test_rename_field(self): "Tests autodetection of renamed fields" # Make state before = self.make_project_state([self.author_name]) after = self.make_project_state([self.author_name_renamed]) autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True})) changes = autodetector._detect_changes() # Check self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names") def test_rename_model(self): "Tests autodetection of renamed models" # Make state before = self.make_project_state([self.author_with_book, self.book]) after = self.make_project_state([self.author_renamed_with_book, self.book_with_author_renamed]) autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True})) changes = autodetector._detect_changes() # Right number of migrations for model rename? self.assertNumberMigrations(changes, 'testapp', 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "RenameModel") self.assertEqual(action.old_name, "Author") self.assertEqual(action.new_name, "Writer") # Now that RenameModel handles related fields too, there should be # no AlterField for the related field. self.assertNumberMigrations(changes, 'otherapp', 0) def test_rename_model_with_renamed_rel_field(self): """ Tests autodetection of renamed models while simultaneously renaming one of the fields that relate to the renamed model. """ # Make state before = self.make_project_state([self.author_with_book, self.book]) after = self.make_project_state([self.author_renamed_with_book, self.book_with_field_and_author_renamed]) autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True, "ask_rename": True})) changes = autodetector._detect_changes() # Right number of migrations for model rename? self.assertNumberMigrations(changes, 'testapp', 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right actions? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "RenameModel") self.assertEqual(action.old_name, "Author") self.assertEqual(action.new_name, "Writer") # Right number of migrations for related field rename? # Alter is already taken care of. self.assertNumberMigrations(changes, 'otherapp', 1) # Right number of actions? migration = changes['otherapp'][0] self.assertEqual(len(migration.operations), 1) # Right actions? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "RenameField") self.assertEqual(action.old_name, "author") self.assertEqual(action.new_name, "writer") def test_fk_dependency(self): "Tests that having a ForeignKey automatically adds a dependency" # Make state # Note that testapp (author) has no dependencies, # otherapp (book) depends on testapp (author), # thirdapp (edition) depends on otherapp (book) before = self.make_project_state([]) after = self.make_project_state([self.author_name, self.book, self.edition]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) self.assertEqual(len(changes['otherapp']), 1) self.assertEqual(len(changes['thirdapp']), 1) # Right number of actions? migration1 = changes['testapp'][0] self.assertEqual(len(migration1.operations), 1) migration2 = changes['otherapp'][0] self.assertEqual(len(migration2.operations), 1) migration3 = changes['thirdapp'][0] self.assertEqual(len(migration3.operations), 1) # Right actions? action = migration1.operations[0] self.assertEqual(action.__class__.__name__, "CreateModel") action = migration2.operations[0] self.assertEqual(action.__class__.__name__, "CreateModel") action = migration3.operations[0] self.assertEqual(action.__class__.__name__, "CreateModel") # Right dependencies? self.assertEqual(migration1.dependencies, []) self.assertEqual(migration2.dependencies, [("testapp", "auto_1")]) self.assertEqual(migration3.dependencies, [("otherapp", "auto_1")]) def test_proxy_fk_dependency(self): "Tests that FK dependencies still work on proxy models" # Make state # Note that testapp (author) has no dependencies, # otherapp (book) depends on testapp (authorproxy) before = self.make_project_state([]) after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertNumberMigrations(changes, 'otherapp', 1) self.assertNumberMigrations(changes, 'thirdapp', 1) # Right number of actions? # Right actions? self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"]) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"]) self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"]) # Right dependencies? self.assertEqual(changes['testapp'][0].dependencies, []) self.assertEqual(changes['otherapp'][0].dependencies, [("thirdapp", "auto_1")]) self.assertEqual(changes['thirdapp'][0].dependencies, [("testapp", "auto_1")]) def test_same_app_no_fk_dependency(self): """ Tests that a migration with a FK between two models of the same app does not have a dependency to itself. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.author_with_publisher, self.publisher]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number/type of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher") # Right dependencies? self.assertEqual(changes['testapp'][0].dependencies, []) def test_circular_fk_dependency(self): """ Tests that having a circular ForeignKey dependency automatically resolves the situation into 2 migrations on one side and 1 on the other. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.author_with_book, self.book, self.publisher_with_book]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertNumberMigrations(changes, 'otherapp', 2) # Right types? self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"]) self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"]) self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") # Right dependencies? self.assertEqual(changes['testapp'][0].dependencies, [("otherapp", "auto_1")]) self.assertEqual(changes['otherapp'][0].dependencies, []) self.assertEqual(set(changes['otherapp'][1].dependencies), set([("otherapp", "auto_1"), ("testapp", "auto_1")])) def test_same_app_circular_fk_dependency(self): """ Tests that a migration with a FK between two models of the same app does not have a dependency to itself. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.author_with_publisher, self.publisher_with_author]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number/type of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher") # Right dependencies? self.assertEqual(changes['testapp'][0].dependencies, []) def test_same_app_circular_fk_dependency_and_unique_together(self): """ Tests that a migration with circular FK dependency does not try to create unique together constraint before creating all required fields first. See ticket #22275. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.knight, self.rabbit]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number/type of migrations? self.assertNumberMigrations(changes, 'eggs', 1) self.assertOperationTypes(changes, 'eggs', 0, ["CreateModel", "CreateModel", "AlterUniqueTogether"]) self.assertFalse("unique_together" in changes['eggs'][0].operations[0].options) self.assertFalse("unique_together" in changes['eggs'][0].operations[1].options) # Right dependencies? self.assertEqual(changes['eggs'][0].dependencies, []) def test_unique_together(self): "Tests unique_together detection" # Make state before = self.make_project_state([self.author_empty, self.book]) after = self.make_project_state([self.author_empty, self.book_unique]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['otherapp']), 1) # Right number of actions? migration = changes['otherapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AlterUniqueTogether") self.assertEqual(action.name, "book") self.assertEqual(action.unique_together, set([("author", "title")])) def test_unique_together_no_changes(self): "Tests that unique_togther doesn't generate a migration if no changes have been made" # Make state before = self.make_project_state([self.author_empty, self.book_unique]) after = self.make_project_state([self.author_empty, self.book_unique]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes), 0) def test_empty_foo_together(self): "#23452 - Empty unique/index_togther shouldn't generate a migration." # Explicitly testing for not specified, since this is the case after # a CreateModel operation w/o any definition on the original model model_state_not_secified = ModelState("a", "model", [("id", models.AutoField(primary_key=True))] ) # Explicitly testing for None, since this was the issue in #23452 after # a AlterFooTogether operation with e.g. () as value model_state_none = ModelState("a", "model", [("id", models.AutoField(primary_key=True))], {"unique_together": None, "index_together": None} ) # Explicitly testing for the empty set, since we now always have sets. # During removal (('col1', 'col2'),) --> () this becomes set([]) model_state_empty = ModelState("a", "model", [("id", models.AutoField(primary_key=True))], {"unique_together": set(), "index_together": set()} ) def test(from_state, to_state, msg): before = self.make_project_state([from_state]) after = self.make_project_state([to_state]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() if len(changes) > 0: ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations) self.fail('Created operation(s) %s from %s' % (ops, msg)) tests = ( (model_state_not_secified, model_state_not_secified, '"not specified" to "not specified"'), (model_state_not_secified, model_state_none, '"not specified" to "None"'), (model_state_not_secified, model_state_empty, '"not specified" to "empty"'), (model_state_none, model_state_not_secified, '"None" to "not specified"'), (model_state_none, model_state_none, '"None" to "None"'), (model_state_none, model_state_empty, '"None" to "empty"'), (model_state_empty, model_state_not_secified, '"empty" to "not specified"'), (model_state_empty, model_state_none, '"empty" to "None"'), (model_state_empty, model_state_empty, '"empty" to "empty"'), ) for t in tests: test(*t) def test_unique_together_ordering(self): "Tests that unique_together also triggers on ordering changes" # Make state before = self.make_project_state([self.author_empty, self.book_unique]) after = self.make_project_state([self.author_empty, self.book_unique_2]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['otherapp']), 1) # Right number of actions? migration = changes['otherapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AlterUniqueTogether") self.assertEqual(action.name, "book") self.assertEqual(action.unique_together, set([("title", "author")])) def test_add_field_and_unique_together(self): "Tests that added fields will be created before using them in unique together" before = self.make_project_state([self.author_empty, self.book]) after = self.make_project_state([self.author_empty, self.book_unique_3]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['otherapp']), 1) # Right number of actions? migration = changes['otherapp'][0] self.assertEqual(len(migration.operations), 2) # Right actions order? action1 = migration.operations[0] action2 = migration.operations[1] self.assertEqual(action1.__class__.__name__, "AddField") self.assertEqual(action2.__class__.__name__, "AlterUniqueTogether") self.assertEqual(action2.unique_together, set([("title", "newfield")])) def test_remove_index_together(self): author_index_together = ModelState("testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)) ], {"index_together": set([("id", "name")])}) before = self.make_project_state([author_index_together]) after = self.make_project_state([self.author_name]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) migration = changes['testapp'][0] # Right number of actions? self.assertEqual(len(migration.operations), 1) # Right actions? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AlterIndexTogether") self.assertEqual(action.index_together, set()) def test_remove_unique_together(self): author_unique_together = ModelState("testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)) ], {"unique_together": set([("id", "name")])}) before = self.make_project_state([author_unique_together]) after = self.make_project_state([self.author_name]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) migration = changes['testapp'][0] # Right number of actions? self.assertEqual(len(migration.operations), 1) # Right actions? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AlterUniqueTogether") self.assertEqual(action.unique_together, set()) def test_proxy(self): "Tests that the autodetector correctly deals with proxy models" # First, we test adding a proxy model before = self.make_project_state([self.author_empty]) after = self.make_project_state([self.author_empty, self.author_proxy]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True}) # Now, we test turning a proxy model into a non-proxy model # It should delete the proxy then make the real one before = self.make_project_state([self.author_empty, self.author_proxy]) after = self.make_project_state([self.author_empty, self.author_proxy_notproxy]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy") self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={}) def test_unmanaged(self): "Tests that the autodetector correctly deals with managed models" # First, we test adding an unmanaged model before = self.make_project_state([self.author_empty]) after = self.make_project_state([self.author_empty, self.author_unmanaged]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged") self.assertEqual(changes['testapp'][0].operations[0].options['managed'], False) # Now, we test turning an unmanaged model into a managed model before = self.make_project_state([self.author_empty, self.author_unmanaged]) after = self.make_project_state([self.author_empty, self.author_unmanaged_managed]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel", "CreateModel"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorUnmanaged") @override_settings(AUTH_USER_MODEL="thirdapp.CustomUser") def test_swappable(self): before = self.make_project_state([self.custom_user]) after = self.make_project_state([self.custom_user, self.author_with_custom_user]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes), 1) # Check the dependency is correct migration = changes['testapp'][0] self.assertEqual(migration.dependencies, [("__setting__", "AUTH_USER_MODEL")]) def test_add_field_with_default(self): """ Adding a field with a default should work (#22030). """ # Make state before = self.make_project_state([self.author_empty]) after = self.make_project_state([self.author_name_default]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 1) # Right action? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AddField") self.assertEqual(action.name, "name") def test_custom_deconstructable(self): """ Two instances which deconstruct to the same value aren't considered a change. """ before = self.make_project_state([self.author_name_deconstructable_1]) after = self.make_project_state([self.author_name_deconstructable_2]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() self.assertEqual(changes, {}) def test_deconstruct_field_kwarg(self): """ Field instances are handled correctly by nested deconstruction. """ before = self.make_project_state([self.author_name_deconstructable_3]) after = self.make_project_state([self.author_name_deconstructable_4]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() self.assertEqual(changes, {}) def test_deconstruct_type(self): """ #22951 -- Uninstanted classes with deconstruct are correctly returned by deep_deconstruct during serialization. """ author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField( max_length=200, # IntegerField intentionally not instantiated. default=models.IntegerField, )) ], ) # Make state before = self.make_project_state([]) after = self.make_project_state([author]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"]) def test_replace_string_with_foreignkey(self): """ Adding an FK in the same "spot" as a deleted CharField should work. (#22300). """ # Make state before = self.make_project_state([self.author_with_publisher_string]) after = self.make_project_state([self.author_with_publisher, self.publisher]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right result? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name") self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher") def test_foreign_key_removed_before_target_model(self): """ Removing an FK and the model it targets in the same change must remove the FK field before the model to maintain consistency. """ before = self.make_project_state([self.author_with_publisher, self.publisher]) after = self.make_project_state([self.author_name]) # removes both the model and FK autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) # Right number of actions? migration = changes['testapp'][0] self.assertEqual(len(migration.operations), 2) # Right actions in right order? action = migration.operations[0] self.assertEqual(action.__class__.__name__, "RemoveField") self.assertEqual(action.name, "publisher") action = migration.operations[1] self.assertEqual(action.__class__.__name__, "DeleteModel") self.assertEqual(action.name, "Publisher") def test_add_many_to_many(self): """ Adding a ManyToManyField should not prompt for a default (#22435). """ class CustomQuestioner(MigrationQuestioner): def ask_not_null_addition(self, field_name, model_name): raise Exception("Should not have prompted for not null addition") before = self.make_project_state([self.author_empty, self.publisher]) # Add ManyToManyField to author model after = self.make_project_state([self.author_with_m2m, self.publisher]) autodetector = MigrationAutodetector(before, after, CustomQuestioner()) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['testapp']), 1) migration = changes['testapp'][0] # Right actions in right order? self.assertEqual(len(migration.operations), 1) action = migration.operations[0] self.assertEqual(action.__class__.__name__, "AddField") self.assertEqual(action.name, "publishers") def test_create_with_through_model(self): """ Adding a m2m with a through model and the models that use it should be ordered correctly. """ before = self.make_project_state([]) after = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) # Right actions in right order? self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"]) def test_many_to_many_removed_before_through_model(self): """ Removing a ManyToManyField and the "through" model in the same change must remove the field before the model to maintain consistency. """ before = self.make_project_state([self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution]) after = self.make_project_state([self.book_with_no_author, self.author_name]) # removes both the through model and ManyToMany autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertEqual(len(changes['otherapp']), 1) # Right number of actions? migration = changes['otherapp'][0] self.assertEqual(len(migration.operations), 4) # Right actions in right order? # The first two are because we can't optimise RemoveField # into DeleteModel reliably. action = migration.operations[0] self.assertEqual(action.__class__.__name__, "RemoveField") self.assertEqual(action.name, "author") action = migration.operations[1] self.assertEqual(action.__class__.__name__, "RemoveField") self.assertEqual(action.name, "book") action = migration.operations[2] self.assertEqual(action.__class__.__name__, "RemoveField") self.assertEqual(action.name, "authors") action = migration.operations[3] self.assertEqual(action.__class__.__name__, "DeleteModel") self.assertEqual(action.name, "Attribution") def test_many_to_many_removed_before_through_model_2(self): """ Removing a model that contains a ManyToManyField and the "through" model in the same change must remove the field before the model to maintain consistency. """ before = self.make_project_state([self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution]) after = self.make_project_state([self.author_name]) # removes both the through model and ManyToMany autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'otherapp', 1) # Right number of actions? self.assertOperationTypes(changes, 'otherapp', 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"]) def test_m2m_w_through_multistep_remove(self): """ A model with a m2m field that specifies a "through" model cannot be removed in the same migration as that through model as the schema will pass through an inconsistent state. The autodetector should produce two migrations to avoid this issue. """ before = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract]) after = self.make_project_state([self.publisher]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) # Right actions in right order? self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "RemoveField", "DeleteModel"]) # Actions touching the right stuff? self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers") self.assertOperationAttributes(changes, "testapp", 0, 1, name="author") self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 3, name="publisher") self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract") def test_non_circular_foreignkey_dependency_removal(self): """ If two models with a ForeignKey from one to the other are removed at the same time, the autodetector should remove them in the correct order. """ before = self.make_project_state([self.author_with_publisher, self.publisher_with_author]) after = self.make_project_state([]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) # Right actions in right order? self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"]) def test_alter_model_options(self): """ Changing a model's options should make a change """ before = self.make_project_state([self.author_empty]) after = self.make_project_state([self.author_with_options]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) # Right actions in right order? self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) # Changing them back to empty should also make a change before = self.make_project_state([self.author_with_options]) after = self.make_project_state([self.author_empty]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) def test_alter_model_options_proxy(self): """ Changing a proxy model's options should also make a change """ before = self.make_project_state([self.author_proxy, self.author_empty]) after = self.make_project_state([self.author_proxy_options, self.author_empty]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, "testapp", 1) # Right actions in right order? self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) def test_set_alter_order_with_respect_to(self): "Tests that setting order_with_respect_to adds a field" # Make state before = self.make_project_state([self.book, self.author_with_book]) after = self.make_project_state([self.book, self.author_with_book_order_wrt]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book") def test_add_alter_order_with_respect_to(self): """ Tests that setting order_with_respect_to when adding the FK too does things in the right order. """ # Make state before = self.make_project_state([self.author_name]) after = self.make_project_state([self.book, self.author_with_book_order_wrt]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book") def test_remove_alter_order_with_respect_to(self): """ Tests that removing order_with_respect_to when removing the FK too does things in the right order. """ # Make state before = self.make_project_state([self.book, self.author_with_book_order_wrt]) after = self.make_project_state([self.author_name]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None) self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book") def test_add_model_order_with_respect_to(self): """ Tests that setting order_with_respect_to when adding the whole model does things in the right order. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.book, self.author_with_book_order_wrt]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"]) self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book") # Make sure the _order field is not in the CreateModel fields self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields]) def test_swappable_first_inheritance(self): """ Tests that swappable models get their CreateModel first. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.custom_user, self.aardvark]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'thirdapp', 1) self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser") self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark") @override_settings(AUTH_USER_MODEL="thirdapp.CustomUser") def test_swappable_first_setting(self): """ Tests that swappable models get their CreateModel first. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.custom_user_no_inherit, self.aardvark]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'thirdapp', 1) self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser") self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark") def test_bases_first(self): """ Tests that bases of other models come first. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.aardvark_based_on_author, self.author_name]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark") def test_proxy_bases_first(self): """ Tests that bases of proxies come first. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.author_empty, self.author_proxy, self.author_proxy_proxy]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy") self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy") def test_pk_fk_included(self): """ Tests that a relation used as the primary key is kept as part of CreateModel. """ # Make state before = self.make_project_state([]) after = self.make_project_state([self.aardvark_pk_fk_author, self.author_name]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark") def test_first_dependency(self): """ Tests that a dependency to an app with no migrations uses __first__. """ # Load graph loader = MigrationLoader(connection) # Make state before = self.make_project_state([]) after = self.make_project_state([self.book_migrations_fk]) after.real_apps = ["migrations"] autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes(graph=loader.graph) # Right number of migrations? self.assertNumberMigrations(changes, 'otherapp', 1) self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"]) self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book") # Right dependencies? self.assertEqual(changes['otherapp'][0].dependencies, [("migrations", "__first__")]) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_last_dependency(self): """ Tests that a dependency to an app with existing migrations uses the last migration of that app. """ # Load graph loader = MigrationLoader(connection) # Make state before = self.make_project_state([]) after = self.make_project_state([self.book_migrations_fk]) after.real_apps = ["migrations"] autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes(graph=loader.graph) # Right number of migrations? self.assertNumberMigrations(changes, 'otherapp', 1) self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"]) self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book") # Right dependencies? self.assertEqual(changes['otherapp'][0].dependencies, [("migrations", "0002_second")]) def test_alter_fk_before_model_deletion(self): """ Tests that ForeignKeys are altered _before_ the model they used to refer to are deleted. """ # Make state before = self.make_project_state([self.author_name, self.publisher_with_author]) after = self.make_project_state([self.aardvark_testapp, self.publisher_with_aardvark_author]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark") self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author") self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author") def test_fk_dependency_other_app(self): """ Tests that ForeignKeys correctly depend on other apps' models (#23100) """ # Make state before = self.make_project_state([self.author_name, self.book]) after = self.make_project_state([self.author_with_book, self.book]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'testapp', 1) self.assertOperationTypes(changes, 'testapp', 0, ["AddField"]) self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book") self.assertEqual(changes['testapp'][0].dependencies, [("otherapp", "__first__")]) def test_circular_dependency_mixed_addcreate(self): """ Tests that the dependency resolver knows to put all CreateModel before AddField and not become unsolvable (#23315) """ address = ModelState("a", "Address", [ ("id", models.AutoField(primary_key=True)), ("country", models.ForeignKey("b.DeliveryCountry")), ]) person = ModelState("a", "Person", [ ("id", models.AutoField(primary_key=True)), ]) apackage = ModelState("b", "APackage", [ ("id", models.AutoField(primary_key=True)), ("person", models.ForeignKey("a.Person")), ]) country = ModelState("b", "DeliveryCountry", [ ("id", models.AutoField(primary_key=True)), ]) # Make state before = self.make_project_state([]) after = self.make_project_state([address, person, apackage, country]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'a', 2) self.assertNumberMigrations(changes, 'b', 1) self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"]) self.assertOperationTypes(changes, 'a', 1, ["AddField"]) self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"]) @override_settings(AUTH_USER_MODEL="a.Tenant") def test_circular_dependency_swappable(self): """ Tests that the dependency resolver knows to explicitly resolve swappable models (#23322) """ tenant = ModelState("a", "Tenant", [ ("id", models.AutoField(primary_key=True)), ("primary_address", models.ForeignKey("b.Address"))], bases=(AbstractBaseUser, ) ) address = ModelState("b", "Address", [ ("id", models.AutoField(primary_key=True)), ("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)), ]) # Make state before = self.make_project_state([]) after = self.make_project_state([address, tenant]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'a', 2) self.assertNumberMigrations(changes, 'b', 1) self.assertOperationTypes(changes, 'a', 0, ["CreateModel"]) self.assertOperationTypes(changes, 'a', 1, ["AddField"]) self.assertOperationTypes(changes, 'b', 0, ["CreateModel"]) self.assertEqual(changes['a'][0].dependencies, []) self.assertEqual(set(changes['a'][1].dependencies), set([('a', 'auto_1'), ('b', 'auto_1')])) self.assertEqual(changes['b'][0].dependencies, [('__setting__', 'AUTH_USER_MODEL')]) @override_settings(AUTH_USER_MODEL="b.Tenant") def test_circular_dependency_swappable2(self): """ Tests that the dependency resolver knows to explicitly resolve swappable models but with the swappable not being the first migrated model (#23322) """ address = ModelState("a", "Address", [ ("id", models.AutoField(primary_key=True)), ("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)), ]) tenant = ModelState("b", "Tenant", [ ("id", models.AutoField(primary_key=True)), ("primary_address", models.ForeignKey("a.Address"))], bases=(AbstractBaseUser, ) ) # Make state before = self.make_project_state([]) after = self.make_project_state([address, tenant]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'a', 2) self.assertNumberMigrations(changes, 'b', 1) self.assertOperationTypes(changes, 'a', 0, ["CreateModel"]) self.assertOperationTypes(changes, 'a', 1, ["AddField"]) self.assertOperationTypes(changes, 'b', 0, ["CreateModel"]) self.assertEqual(changes['a'][0].dependencies, []) self.assertEqual(set(changes['a'][1].dependencies), set([('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')])) self.assertEqual(changes['b'][0].dependencies, [('a', 'auto_1')]) @override_settings(AUTH_USER_MODEL="a.Person") def test_circular_dependency_swappable_self(self): """ Tests that the dependency resolver knows to explicitly resolve swappable models (#23322) """ person = ModelState("a", "Person", [ ("id", models.AutoField(primary_key=True)), ("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, related_name='children')) ]) # Make state before = self.make_project_state([]) after = self.make_project_state([person]) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number of migrations? self.assertNumberMigrations(changes, 'a', 1) self.assertOperationTypes(changes, 'a', 0, ["CreateModel"]) self.assertEqual(changes['a'][0].dependencies, [])
# -*- coding: utf-8 -*- """ Markov Decision Process solver. """ __author__ = "Julian Jara-Ettinger" __license__ = "MIT" import numpy as np import math import random class MDP(object): def __init__(self, S=[], A=[], T=[], R=[], gamma=0.95, tau=0.01): """ Markov Decision Process (MDP) class. Args: S (list): List of states A (list): List of actions T (matrix): Transition matrix where T[SO,A,SF] is the probability of moving from So to SF after taking action A R (matrix): Reward function where R[A,S] is the reward for taking action A in state S gamma (float): Future discount tau (float): Softmax parameter Returns: MDP object """ self.S = S self.A = A self.T = T self.R = R self.gamma = gamma self.tau = tau self.values = np.zeros((1, len(S))) # Where we'll store the softmaxed probabilities self.policy = np.zeros((len(A), len(S))) def ValueIteration(self, epsilon=0.0001): """ Perform value iteration on MDP. Calculates each state's value and saves them in MDP's values attribute Args: epsilon (float): Convergence parameter Returns: None """ self.values = np.zeros(self.values.shape) while True: V2 = self.values.copy() for i in range(0, len(self.S)): prod = self.gamma * \ (np.mat(self.T[i, :, :]) * np.mat(V2.transpose())) self.values[0, i] = max(prod[j] + self.R[j, i] for j in range(len(self.A))) # options = [(self.R[j, i] + self.gamma * (np.mat(self.T[i, :, :])) # * np.mat(V2.transpose())).max() for j in range(len(self.A))] #self.values[0, i] = max(options) if (self.values - V2).max() <= epsilon: break def Validate(self): """ Check that MDP object is correct. Args: None """ print("Validating MDP...") dims = self.T.shape states = len(self.S) actions = len(self.A) if (dims[0] != dims[2]): print("ERROR: Transition matrix is not square. MDP-001") return 0 if (states != dims[0]): print("ERROR: Transition matrix does not match number of states. MDP-002") return 0 if self.S != range(states): print("ERROR: States are not correctly numbered. MDP-003") return 0 if self.A != range(actions): print("ERROR: Actions are not correctly numbered. MDP-004") return 0 if (dims[1] != actions): print("ERROR: Transition matrix does not match number of actions. MDP-005") return 0 if (self.gamma >= 1) or (self.gamma <= 0): print("ERROR: Invalida value of gamma. MDP-006") return 0 if (self.tau <= 0): if (self.tau is not None): print("ERROR: Invalida value of tau. MDP-009") return 0 # Check that every vector adds up to 1 res = (np.ndarray.flatten(np.sum(self.T, axis=2)) == 1) if len(res) != sum(res): print("ERROR: Transition matrix rows do not add up to 1. MDP-007") return 0 return 1 def BuildPolicy(self, Softmax=True): """ Build optimal policy Calculates MDPs optimal policy Args: Softmax (bool): Indicates if actions are softmaxed. Returns: None """ # Build a policy using the results from value iteration for i in range(0, len(self.S)): options = np.mat( self.T[i, :, :]) * np.mat(self.values.transpose()) options = options.tolist() # Prevent softmax from overflowing maxval = abs(max(options)[0]) options = [options[j][0] - maxval for j in range(len(options))] # Softmax the policy if Softmax: try: options = [math.exp(options[j] / self.tau) for j in range(len(options))] except OverflowError: print("ERROR: Failed to softmax policy. MDP-008") raise # If all actions have no value then set a uniform distribution if sum(options) == 0: self.policy[:, i] = [ 1.0 / len(options) for j in range(len(options))] else: self.policy[:, i] = [ options[j] / sum(options) for j in range(len(options))] else: totalchoices = sum([options[optloop] == max(options) for optloop in range(len(options))]) self.policy[:, i] = [(1.0 / totalchoices if options[optloop] == max(options) else 0) for optloop in range(len(options))] def GetStates(self, StartingPoint, ActionSequence): """ Produce the sequence of states with highest likelihood given a starting point and sequence of actions. Args: StartingPoint (int): State number where agent begins. ActionSequence (list): List of indices of actions. Returns: List of state numbers """ StateSequence = [0] * (len(ActionSequence) + 1) StateSequence[0] = StartingPoint for i in range(len(ActionSequence)): StateSequence[i + 1] = ( self.T[StateSequence[i], ActionSequence[i], :]).argmax() return StateSequence def Run(self, State, Softmax=False, Simple=False): """ Sample an action from the optimal policy given the state. Note that if softmax is set to true then Simple is ignored (see below). Args: State (int): State number where agent begins. Softmax (bool): Simulate with softmaxed policy? Simple (bool): Some states have various actions all with an equally high value. when this happens, Run() randomly selects one of these actions. if Simple is set to True, it selects the first highest-value action. Returns: List of state numbers """ if Softmax: # If softmaxing then select a random sample ActSample = random.uniform(0, 1) ActionProbs = self.policy[:, State] ActionChoice = -1 for j in range(len(ActionProbs)): if ActSample < ActionProbs[j]: ActionChoice = j break else: ActSample -= ActionProbs[j] else: maxval = max(self.policy[:, State]) maxindices = [ i for i, j in enumerate(self.policy[:, State]) if j == maxval] if Simple: ActionChoice = maxindices[0] else: ActionChoice = random.choice(maxindices) # Now find the next state EndStates = self.T[State, ActionChoice, :] StateSample = random.uniform(0, 1) for j in range(len(EndStates)): if StateSample < EndStates[j]: EndState = j break else: StateSample -= EndStates[j] return [EndState, ActionChoice] def Display(self, Full=False): """ Print object attributes. .. Internal function:: This function is for internal use only. Args: Full (bool): When set to False, function only prints attribute names. Otherwise, it also prints its values. Returns: standard output summary """ if Full: for (property, value) in vars(self).iteritems(): print(property, ': ', value) else: for (property, value) in vars(self).iteritems(): print(property)
import os import time import umsgpack import datetime import threading import btctxstore import storjnode from storjnode.util import safe_log_var from storjnode.common import THREAD_SLEEP from kademlia.network import Server as KademliaServer from kademlia.storage import ForgetfulStorage from kademlia.node import Node as KademliaNode from kademlia.routing import TableTraverser from storjnode.network.protocol import Protocol from twisted.internet import defer from twisted.internet.task import LoopingCall from crochet import run_in_reactor from storjnode.network.messages.base import MAX_MESSAGE_DATA if os.environ.get("STORJNODE_QUERY_TIMEOUT"): QUERY_TIMEOUT = float(os.environ.get("STORJNODE_QUERY_TIMEOUT")) else: QUERY_TIMEOUT = 5.0 # default seconds WALK_TIMEOUT = QUERY_TIMEOUT * 24.0 _log = storjnode.log.getLogger(__name__) class MessageRelayer(object): def __init__(self, server, dest, hop_limit, message): self.server = server self.node = self.server.node self.dest = KademliaNode(dest) self.hop_limit = hop_limit self.message = message self.nearest = None @run_in_reactor def start(self): self.nearest = self.server.protocol.router.findNeighbors( self.dest, exclude=self.server.node ) txt = "{1}: Relaying to nearest peers: {0}" _log.debug(txt.format(repr(self.nearest), self.server.get_address())) self.nearest.reverse() # reverse so you can pop the next self.attempt_relay([True, None]) def __call__(self, result): self.attempt_relay(result) def attempt_relay(self, result): success = bool(result[0] and result[1]) dest_address = storjnode.util.node_id_to_address(self.dest.id) if success: txt = "{1}: Successfully relayed message for {0}" _log.debug(txt.format(dest_address, self.server.get_address())) return # relay only to nearest peer, avoid amplification attacks! elif not self.nearest: txt = "{1}: Failed to relay message for {0}" _log.debug(txt.format(dest_address, self.server.get_address())) return relay_node = self.nearest.pop() address = storjnode.util.node_id_to_address(relay_node.id) # do not relay away from node if self.dest.distanceTo(self.node) <= self.dest.distanceTo(relay_node): txt = "{1}: Aborting relay attempt, {0} farther then self." _log.debug(txt.format(address, self.get_address())) return # attempt to relay message txt = "{1}: Attempting to relay message for {0}" _log.debug(txt.format(address, self.server.get_address())) self.server.protocol.callRelayMessage( relay_node, self.dest.id, self.hop_limit, self.message ).addCallback(self) class Server(KademliaServer): def __init__(self, key, port, ksize=20, alpha=3, storage=None, max_messages=1024, default_hop_limit=64, refresh_neighbours_interval=WALK_TIMEOUT): """ Create a server instance. This will start listening on the given port. Args: key (str): Bitcoin wif/hwif for auth, encryption and node id. ksize (int): The k parameter from the kademlia paper. alpha (int): The alpha parameter from the kademlia paper storage: implements :interface:`~kademlia.storage.IStorage` refresh_neighbours_interval (float): Auto refresh neighbours. """ self.port = port self._default_hop_limit = default_hop_limit self._refresh_neighbours_interval = refresh_neighbours_interval self._cached_address = None self.port_handler = None self.btctxstore = btctxstore.BtcTxStore(testnet=False) # allow hwifs is_hwif = self.btctxstore.validate_wallet(key) self.key = self.btctxstore.get_key(key) if is_hwif else key # XXX kademlia.network.Server.__init__ cant use super because Protocol # passing the protocol class should be added upstream self.ksize = ksize self.alpha = alpha self.log = storjnode.log.getLogger("kademlia.network") self.log.setLevel(60) self.storage = storage or ForgetfulStorage() self.node = KademliaNode(self.get_id()) self.protocol = Protocol( self.node, self.storage, ksize, max_messages=max_messages, max_hop_limit=self._default_hop_limit ) self.refreshLoop = LoopingCall(self.refreshTable).start(3600) self._start_threads() def _start_threads(self): # setup relay message thread self._relay_thread_stop = False self._relay_thread = threading.Thread(target=self._relay_loop) self._relay_thread.start() # setup refresh neighbours thread if self._refresh_neighbours_interval > 0.0: self._refresh_thread_stop = False self._refresh_thread = threading.Thread(target=self._refresh_loop) self._refresh_thread.start() def set_port_handler(self, port_handler): self.port_handler = port_handler def stop(self): if self._refresh_neighbours_interval > 0.0: self._refresh_thread_stop = True self._refresh_thread.join() self._relay_thread_stop = True self._relay_thread.join() # disconnect from port and stop properly if self.port_handler is not None: self.port_handler.stopListening() @run_in_reactor def refresh_neighbours(self): _log.debug("Refreshing neighbours ...") self.bootstrap(self.bootstrappableNeighbors()) def get_id(self): return storjnode.util.address_to_node_id(self.get_address()) def get_address(self): if self._cached_address is not None: return self._cached_address self._cached_address = self.btctxstore.get_address(self.key) return self._cached_address def get_known_peers(self): """Returns list of known node.""" return TableTraverser(self.protocol.router, self.node) def get_neighbours(self): return self.protocol.router.findNeighbors(self.node, exclude=self.node) def has_messages(self): return self.protocol.has_messages() def get_messages(self): return self.protocol.get_messages() def relay_message(self, nodeid, message): """Send relay message to a node. Queues a message to be relayed accross the network. Relay messages are sent to the node nearest the receiver in the routing table that accepts the relay message. This continues until it reaches the destination or the nearest node to the receiver is reached. Because messages are always relayed only to reachable nodes in the current routing table, there is a fare chance nodes behind a NAT can be reached if it is connected to the network. Args: nodeid: 160bit nodeid of the reciever as bytes message: iu-msgpack-python serializable message data Returns: True if message was added to relay queue, otherwise False. """ # check max message size packed_message = umsgpack.packb(message) if len(packed_message) > MAX_MESSAGE_DATA: raise Exception("Message to large {0} > {1}: {2}".format( len(packed_message), MAX_MESSAGE_DATA, repr(message) )) message = umsgpack.unpackb(packed_message) # sanatize abstract types if nodeid == self.node.id: _log.debug("Adding message to self to received queue!.") return self.protocol.queue_received_message(message) else: txt = "Queuing relay messaging for %s: %s" address = storjnode.util.node_id_to_address(nodeid) if type(message) in (type(b""), type(u"")): safe_msg = safe_log_var(message) else: safe_msg = message _log.debug(txt % (address, safe_msg)) return self.protocol.queue_relay_message({ "dest": nodeid, "message": message, "hop_limit": self._default_hop_limit }) def _refresh_loop(self): last_refresh = datetime.datetime.now() delta = datetime.timedelta(seconds=self._refresh_neighbours_interval) while not self._refresh_thread_stop: if (datetime.datetime.now() - last_refresh) > delta: self.refresh_neighbours() last_refresh = datetime.datetime.now() time.sleep(THREAD_SLEEP) def _relay_loop(self): while not self._relay_thread_stop: q = self.protocol.messages_relay for entry in storjnode.util.empty_queue(q): message_relayer = MessageRelayer(self, **entry) message_relayer.start() time.sleep(THREAD_SLEEP) def get_transport_info(self, unl=None): def handle(results): results = filter(lambda r: r[0], results) # filter successful if not results: _log.warning("No successful stun!") return None # FIXME check all entries as some nodes may be on the local net result = results[0][1] if not result: _log.warning("No stun result!") return None wan = (result[0], result[1]) lan = (storjnode.util.get_inet_facing_ip(), self.port) transport_info = { "wan": wan, "lan": lan, "unl": unl, "is_public": wan == lan } return transport_info ds = [] for neighbor in self.bootstrappableNeighbors(): ds.append(self.protocol.stun(neighbor)) return defer.gatherResults(ds).addCallback(handle)
# -*- coding: utf-8 -*- """ Django settings for distadmin project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from os.path import join # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings try: from S3 import CallingFormat AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN except ImportError: # TODO: Fix this where even if in Dev this class is called. pass from configurations import Configuration, values BASE_DIR = os.path.dirname(os.path.dirname(__file__)) class Common(Configuration): ########## APP CONFIGURATION DJANGO_APPS = ( # Default Django apps: 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Useful template tags: # 'django.contrib.humanize', # Admin 'django.contrib.admin', ) THIRD_PARTY_APPS = ( 'south', # Database migration helpers: 'crispy_forms', # Form layouts 'avatar', # for user avatars ) # Apps specific for this project go here. LOCAL_APPS = ( 'users', # custom users app # Your stuff: custom apps go here ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS INSTALLED_APPS += ( # Needs to come last for now because of a weird edge case between # South and allauth 'allauth', # registration 'allauth.account', # registration 'allauth.socialaccount', # registration ) ########## END APP CONFIGURATION ########## MIDDLEWARE CONFIGURATION MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ########## END MIDDLEWARE CONFIGURATION ########## DEBUG # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = values.BooleanValue(True) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG ########## END DEBUG ########## SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. # In production, this is changed to a values.SecretValue() setting SECRET_KEY = "CHANGEME!!!" ########## END SECRET CONFIGURATION ########## FIXTURE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( join(BASE_DIR, 'fixtures'), ) ########## END FIXTURE CONFIGURATION ########## EMAIL CONFIGURATION EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend') ########## END EMAIL CONFIGURATION ########## MANAGER CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = ( ('Peter C', 'peter@drifty.com'), ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS ########## END MANAGER CONFIGURATION ########## DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = values.DatabaseURLValue('postgres://localhost/distadmin') ########## END DATABASE CONFIGURATION ########## CACHING # Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows. # memcacheify is what's used in Production CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } ########## END CACHING ########## GENERAL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'America/Los_Angeles' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True ########## END GENERAL CONFIGURATION ########## TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', "allauth.account.context_processors.account", "allauth.socialaccount.context_processors.socialaccount", 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', # Your stuff: custom template context processers go here ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_DIRS = ( join(BASE_DIR, 'templates'), ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs CRISPY_TEMPLATE_PACK = 'bootstrap3' ########## END TEMPLATE CONFIGURATION ########## STATIC FILE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles') # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = ( join(BASE_DIR, 'static'), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) ########## END STATIC FILE CONFIGURATION ########## MEDIA CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = join(BASE_DIR, 'media') # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' ########## END MEDIA CONFIGURATION ########## URL Configuration ROOT_URLCONF = 'config.urls' # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = 'config.wsgi.application' ########## End URL Configuration ########## AUTHENTICATION CONFIGURATION AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ) # Some really nice defaults ACCOUNT_AUTHENTICATION_METHOD = "username" ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = "mandatory" ########## END AUTHENTICATION CONFIGURATION ########## Custom user app defaults # Select the correct user model AUTH_USER_MODEL = "users.User" LOGIN_REDIRECT_URL = "users:redirect" ########## END Custom user app defaults ########## SLUGLIFIER AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify" ########## END SLUGLIFIER ########## LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } ########## END LOGGING CONFIGURATION ########## Your common stuff: Below this line define 3rd party libary settings class Local(Common): ########## INSTALLED_APPS INSTALLED_APPS = Common.INSTALLED_APPS ########## END INSTALLED_APPS ########## Mail settings EMAIL_HOST = "localhost" EMAIL_PORT = 1025 EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend') ########## End mail settings ########## django-debug-toolbar MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',) INSTALLED_APPS += ('debug_toolbar',) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'SHOW_TEMPLATE_CONTEXT': True, } ########## end django-debug-toolbar ########## Your local stuff: Below this line define 3rd party libary settings class Production(Common): ########## INSTALLED_APPS INSTALLED_APPS = Common.INSTALLED_APPS ########## END INSTALLED_APPS ########## SECRET KEY SECRET_KEY = values.SecretValue() ########## END SECRET KEY ########## django-secure INSTALLED_APPS += ("djangosecure", ) # set this to 60 seconds and then to 518400 when you can prove it works SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True) SECURE_FRAME_DENY = values.BooleanValue(True) SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True) SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True) SESSION_COOKIE_SECURE = values.BooleanValue(False) SESSION_COOKIE_HTTPONLY = values.BooleanValue(True) SECURE_SSL_REDIRECT = values.BooleanValue(True) ########## end django-secure ########## SITE CONFIGURATION # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ["*"] ########## END SITE CONFIGURATION INSTALLED_APPS += ("gunicorn", ) ########## STORAGE CONFIGURATION # See: http://django-storages.readthedocs.org/en/latest/index.html INSTALLED_APPS += ( 'storages', ) # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage' # See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings AWS_ACCESS_KEY_ID = values.SecretValue() AWS_SECRET_ACCESS_KEY = values.SecretValue() AWS_STORAGE_BUCKET_NAME = values.SecretValue() AWS_AUTO_CREATE_BUCKET = True AWS_QUERYSTRING_AUTH = False # AWS cache settings, don't change unless you know what you're doing: AWS_EXPIREY = 60 * 60 * 24 * 7 AWS_HEADERS = { 'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY, AWS_EXPIREY) } # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME ########## END STORAGE CONFIGURATION ########## EMAIL DEFAULT_FROM_EMAIL = values.Value( 'distadmin <distadmin-noreply@example.com>') EMAIL_HOST = values.Value('smtp.sendgrid.com') EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD") EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME") EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT") EMAIL_SUBJECT_PREFIX = values.Value('[distadmin] ', environ_name="EMAIL_SUBJECT_PREFIX") EMAIL_USE_TLS = True SERVER_EMAIL = EMAIL_HOST_USER ########## END EMAIL ########## TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_LOADERS = ( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )), ) ########## END TEMPLATE CONFIGURATION ########## CACHING # Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows. CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211") ########## END CACHING ########## Your production stuff: Below this line define 3rd party libary settings
from django.conf import settings from django.http import JsonResponse from rest_framework.viewsets import ViewSet from rest_framework.parsers import JSONParser from rest_framework.decorators import detail_route, parser_classes from rest_api.tools import set_ikeys, split_cols from rest_api.exceptions import ( JasminSyntaxError, JasminError, ActionFailed, ObjectNotFoundError, UnknownError, ) STANDARD_PROMPT = settings.STANDARD_PROMPT INTERACTIVE_PROMPT = settings.INTERACTIVE_PROMPT class SMPPCCMViewSet(ViewSet): "Viewset for managing SMPP Client Connectors" lookup_field = 'cid' def get_smppccm(self, telnet, cid, silent=False): #Some of this could be abstracted out - similar pattern in users.py telnet.sendline('smppccm -s ' + cid) matched_index = telnet.expect([ r'.+Unknown connector:.*' + STANDARD_PROMPT, r'.+Usage:.*' + STANDARD_PROMPT, r'(.+)\n' + STANDARD_PROMPT, ]) if matched_index != 2: if silent: return else: raise ObjectNotFoundError('Unknown connector: %s' % cid) result = telnet.match.group(1) smppccm = {} for line in result.splitlines(): d = [x for x in line.split() if x] if len(d) == 2: smppccm[d[0]] = d[1] return smppccm def get_connector_list(self, telnet): telnet.sendline('smppccm -l') telnet.expect([r'(.+)\n' + STANDARD_PROMPT]) result = telnet.match.group(0).strip().replace("\r", '').split("\n") if len(result) < 3: return [] return split_cols(result[2:-2]) def simple_smppccm_action(self, telnet, action, cid): telnet.sendline('smppccm -%s %s' % (action, cid)) matched_index = telnet.expect([ r'.+Successfully(.+)' + STANDARD_PROMPT, r'.+Unknown connector: (.+)' + STANDARD_PROMPT, r'(.*)' + STANDARD_PROMPT, ]) if matched_index == 0: telnet.sendline('persist\n') return JsonResponse({'name': cid}) elif matched_index == 1: raise ObjectNotFoundError('Unknown SMPP Connector: %s' % cid) else: raise ActionFailed(telnet.match.group(1)) def list(self, request): """List SMPP Client Connectors. No parameters Differs from slightly from telent CLI names and values: 1. the "service" column is called "status" 2. the cid is the full connector id of the form smpps(cid) """ telnet = request.telnet connector_list = self.get_connector_list(telnet) connectors = [] for raw_data in connector_list: if raw_data[0][0] == '#': cid = raw_data[0][1:] connector = self.get_smppccm(telnet, cid, True) connector.update( cid=cid, status=raw_data[1], session=raw_data[2], starts=raw_data[3], stops=raw_data[4] ) connectors.append(connector) return JsonResponse({'connectors': connectors}) def retrieve(self, request, cid): """Retreive data for one connector Required parameter: cid (connector id)""" telnet = request.telnet connector = self.get_smppccm(telnet, cid, silent=False) connector_list = self.get_connector_list(telnet) list_data = next( (raw_data for raw_data in connector_list if raw_data[0] == '#' + cid), None ) connector.update( cid=cid, status=list_data[1], session=list_data[2], starts=list_data[3], stops=list_data[4] ) return JsonResponse({'connector': connector}) def create(self, request): """Create an SMPP Client Connector. Required parameter: cid (connector id) --- # YAML omit_serializer: true parameters: - name: cid description: Connector identifier required: true type: string paramType: form """ telnet = request.telnet telnet.sendline('smppccm -a') updates = request.data for k, v in updates.items(): if not ((type(updates) is dict) and (len(updates) >= 1)): raise JasminSyntaxError('updates should be a a key value array') telnet.sendline("%s %s" % (k, v)) matched_index = telnet.expect([ r'.*(Unknown SMPPClientConfig key:.*)' + INTERACTIVE_PROMPT, r'.*(Error:.*)' + STANDARD_PROMPT, r'.*' + INTERACTIVE_PROMPT, r'.+(.*)(' + INTERACTIVE_PROMPT + '|' + STANDARD_PROMPT + ')', ]) if matched_index != 2: raise JasminSyntaxError( detail=" ".join(telnet.match.group(1).split())) telnet.sendline('ok') telnet.sendline('persist\n') telnet.expect(r'.*' + STANDARD_PROMPT) return JsonResponse({'cid': request.data['cid']}) def destroy(self, request, cid): """Delete an smpp connector. One parameter required, the connector identifier HTTP codes indicate result as follows - 200: successful deletion - 404: nonexistent group - 400: other error """ return self.simple_smppccm_action(request.telnet, 'r', cid) @parser_classes((JSONParser,)) def partial_update(self, request, cid): """Update some SMPP connector attributes JSON requests only. The updates parameter is a key value array --- # YAML omit_serializer: true parameters: - name: updates description: Items to update required: true type: array paramType: body """ telnet = request.telnet telnet.sendline('smppccm -u ' + cid) matched_index = telnet.expect([ r'.*Updating connector(.*)' + INTERACTIVE_PROMPT, r'.*Unknown connector: (.*)' + STANDARD_PROMPT, r'.+(.*)(' + INTERACTIVE_PROMPT + '|' + STANDARD_PROMPT + ')', ]) if matched_index == 1: raise UnknownError(detail='Unknown connector:' + cid) if matched_index != 0: raise JasminError(detail=" ".join(telnet.match.group(0).split())) updates = request.data for k, v in updates.items(): if not ((type(updates) is dict) and (len(updates) >= 1)): raise JasminSyntaxError('updates should be a a key value array') telnet.sendline("%s %s" % (k, v)) matched_index = telnet.expect([ r'.*(Unknown SMPPClientConfig key:.*)' + INTERACTIVE_PROMPT, r'.*(Error:.*)' + STANDARD_PROMPT, r'.*' + INTERACTIVE_PROMPT, r'.+(.*)(' + INTERACTIVE_PROMPT + '|' + STANDARD_PROMPT + ')', ]) if matched_index != 2: raise JasminSyntaxError( detail=" ".join(telnet.match.group(1).split())) telnet.sendline('ok') ok_index = telnet.expect([ r'.*(Error:.*)' + STANDARD_PROMPT, r'(.*)' + INTERACTIVE_PROMPT, r'.*' + STANDARD_PROMPT, ]) if ok_index == 0: raise JasminSyntaxError( detail=" ".join(telnet.match.group(1).split())) telnet.sendline('persist\n') #Not sure why this needs to be repeated, just as with user telnet.expect(r'.*' + STANDARD_PROMPT) return JsonResponse( {'connector': self.get_smppccm(telnet, cid, silent=False)}) @detail_route(methods=['put']) def start(self, request, cid): """Start SMPP Connector One parameter required, the connector identifier HTTP codes indicate result as follows - 200: successful start - 404: nonexistent connector - 400: other error - this includes failure to start because it is started. """ return self.simple_smppccm_action(request.telnet, '1', cid) @detail_route(methods=['put']) def stop(self, request, cid): """Start SMPP Connector One parameter required, the connector identifier HTTP codes indicate result as follows - 200: successful start - 404: nonexistent connector - 400: other error - this includes failure to stop because it is stopped. """ return self.simple_smppccm_action(request.telnet, '0', cid)
# -*- coding: utf-8 -*- import mock from django import test from django import http from django.conf import settings from django.utils import timezone from cradmin_legacy import cradmin_testhelpers from model_bakery import baker from devilry.devilry_account import models as account_models from devilry.apps.core import models as core_models from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql from devilry.devilry_dbcache.models import AssignmentGroupCachedData from devilry.devilry_deadlinemanagement.views import manage_deadline_view from devilry.devilry_group import devilry_group_baker_factories as group_baker from devilry.devilry_group import models as group_models from devilry.utils import datetimeutils from devilry.utils.datetimeutils import isoformat_withseconds class AdminTestCaseMixin(test.TestCase, cradmin_testhelpers.TestCaseMixin): viewclass = manage_deadline_view.ManageDeadlineAllGroupsView handle_deadline = 'new-attempt' def setUp(self): AssignmentGroupDbCacheCustomSql().initialize() def _get_mock_instance(self, assignment): mock_instance = mock.MagicMock() mock_instance.get_devilryrole_type.return_value = 'admin' mock_instance.assignment = assignment return mock_instance def _get_mock_app(self, user=None): mock_app = mock.MagicMock() mock_app.get_devilryrole.return_value = 'admin' mock_app.get_accessible_group_queryset.return_value = core_models.AssignmentGroup.objects\ .filter_user_is_admin(user=user) return mock_app def _create_department_admin(self, period): testuser = baker.make(settings.AUTH_USER_MODEL) subject = period.parentnode permissiongroup = baker.make( 'devilry_account.SubjectPermissionGroup', permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_DEPARTMENTADMIN, subject=subject).permissiongroup baker.make('devilry_account.PermissionGroupUser', user=testuser, permissiongroup=permissiongroup) return testuser def _create_subject_admin(self, period): testuser = baker.make(settings.AUTH_USER_MODEL) subject = period.parentnode permissiongroup = baker.make( 'devilry_account.SubjectPermissionGroup', permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_SUBJECTADMIN, subject=subject ).permissiongroup baker.make('devilry_account.PermissionGroupUser', user=testuser, permissiongroup=permissiongroup) return testuser def _create_period_admin(self, period): testuser = baker.make(settings.AUTH_USER_MODEL) permissiongroup = baker.make( 'devilry_account.PeriodPermissionGroup', permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_PERIODADMIN, period=period ).permissiongroup baker.make('devilry_account.PermissionGroupUser', user=testuser, permissiongroup=permissiongroup) return testuser def _get_admin_user(self, period): return self._create_period_admin(period) class TestManageDeadlineNewAttemptAllGroupsView(AdminTestCaseMixin): viewclass = manage_deadline_view.ManageDeadlineAllGroupsView handle_deadline = 'new-attempt' def test_all_groups_added_to_form_hidden(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup2) testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup3) testgroup4 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup4) testuser = self._get_admin_user(testassignment.parentnode) mockresponse = self.mock_http200_getrequest_htmls( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline } ) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup3.id), mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup4.id), mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8')) def test_all_only_one_group_added_to_form_hidden(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) mockresponse = self.mock_http200_getrequest_htmls( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline } ) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) def test_post_only_groups_added_as_form_hidden_input(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) self.mock_http302_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id] } } ) feedbacksets = group_models.FeedbackSet.objects.all() self.assertEqual(3, feedbacksets.count()) group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id) group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id) new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59. self.assertEqual(new_deadline, group1.cached_data.last_feedbackset.deadline_datetime) self.assertNotEqual(new_deadline, group2.cached_data.last_feedbackset.deadline_datetime) def test_post_groups_unpublished_raises_error(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) with self.assertRaises(http.Http404): self.mock_http302_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertEqual(2, group_models.FeedbackSet.objects.count()) class TestSubjectAdminNewAttemptAllGroupsView(TestManageDeadlineNewAttemptAllGroupsView): def _get_admin_user(self, period): return self._create_subject_admin(period) class TestDepartmentAdminNewAttemptAllGroupsView(TestManageDeadlineNewAttemptAllGroupsView): def _get_admin_user(self, period): return self._create_department_admin(period) class TestManageDeadlineMoveDeadlineAllGroupsView(AdminTestCaseMixin): viewclass = manage_deadline_view.ManageDeadlineAllGroupsView handle_deadline = 'move-deadline' def test_all_groups_added_to_form_hidden(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup3) testgroup4 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup4) testuser = self._get_admin_user(testassignment.parentnode) mockresponse = self.mock_http200_getrequest_htmls( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline } ) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup3.id), mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup4.id), mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8')) def test_all_only_one_group_added_to_form_hidden(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) mockresponse = self.mock_http200_getrequest_htmls( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline } ) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) def test_post_only_groups_added_as_form_hidden_input(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup1) group_baker.feedbackset_first_attempt_published(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) self.mock_http302_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id] } } ) feedbacksets = group_models.FeedbackSet.objects.all() self.assertEqual(2, feedbacksets.count()) group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id) group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id) self.assertEqual(group1.cached_data.last_feedbackset, group1.cached_data.first_feedbackset) new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59. self.assertEqual(new_deadline, group1.cached_data.last_feedbackset.deadline_datetime) self.assertNotEqual(new_deadline, group2.cached_data.last_feedbackset.deadline_datetime) def test_post_groups_published_raises_error(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) with self.assertRaises(http.Http404): self.mock_http302_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertEqual(2, group_models.FeedbackSet.objects.count()) class TestSubjectAdminMoveDeadlinAllGroupsView(TestManageDeadlineMoveDeadlineAllGroupsView): def _get_admin_user(self, period): return self._create_subject_admin(period) class TestDepartmentAdminMoveDeadlineAllGroupsView(TestManageDeadlineMoveDeadlineAllGroupsView): def _get_admin_user(self, period): return self._create_department_admin(period) class TestManageDeadlineNewAttemptFromPreviousView(AdminTestCaseMixin): """ Tests posting data from another view, and the actual posting in this view. """ viewclass = manage_deadline_view.ManageDeadlineFromPreviousView handle_deadline = 'new-attempt' def test_post_from_previous_view_selected_groups_are_hidden(self): # By adding the post_type_received_data to the key, we are simulating that the # post comes from a different view. testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) mockresponse = self.mock_http200_postrequest_htmls( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'post_type_received_data': '', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) def test_post_new_attempt(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) self.mock_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': 'new-attempt' }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertEqual(4, group_models.FeedbackSet.objects.count()) self.assertEqual(2, group_models.GroupComment.objects.count()) group_comments = group_models.GroupComment.objects.all() last_feedbackset_group1 = AssignmentGroupCachedData.objects.get(group_id=testgroup1.id).last_feedbackset last_feedbackset_group2 = AssignmentGroupCachedData.objects.get(group_id=testgroup2.id).last_feedbackset new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59. self.assertEqual(last_feedbackset_group1.deadline_datetime, new_deadline) self.assertEqual(last_feedbackset_group2.deadline_datetime, new_deadline) self.assertEqual(last_feedbackset_group1.last_updated_by, testuser) self.assertEqual(last_feedbackset_group2.last_updated_by, testuser) self.assertEqual('You have been given a new attempt.', group_comments[0].text) self.assertEqual('You have been given a new attempt.', group_comments[1].text) def test_post_groups_published_raises_error(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) with self.assertRaises(http.Http404): self.mock_http302_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertEqual(2, group_models.FeedbackSet.objects.count()) class TestSubjectAdminNewAttemptFromPreviousView(TestManageDeadlineNewAttemptFromPreviousView): def _get_admin_user(self, period): return self._create_subject_admin(period) class TestDepartmentAdminNewAttemptFromPreviousView(TestManageDeadlineNewAttemptFromPreviousView): def _get_admin_user(self, period): return self._create_department_admin(period) class TestManageDeadlineMoveDeadlineFromPreviousView(AdminTestCaseMixin): viewclass = manage_deadline_view.ManageDeadlineFromPreviousView handle_deadline = 'move-deadline' def test_post_from_previous_view_selected_groups_are_hidden(self): # By adding the post_type_received_data to the key, we are simulating that the # post comes from a different view. testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) mockresponse = self.mock_http200_postrequest_htmls( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'post_type_received_data': '', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8')) self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8')) def test_post_new_attempt(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) self.mock_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': 'new-attempt' }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertEqual(4, group_models.FeedbackSet.objects.count()) self.assertEqual(2, group_models.GroupComment.objects.count()) group_comments = group_models.GroupComment.objects.all() last_feedbackset_group1 = AssignmentGroupCachedData.objects.get(group_id=testgroup1.id).last_feedbackset last_feedbackset_group2 = AssignmentGroupCachedData.objects.get(group_id=testgroup2.id).last_feedbackset new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59. self.assertEqual(last_feedbackset_group1.deadline_datetime, new_deadline) self.assertEqual(last_feedbackset_group2.deadline_datetime, new_deadline) self.assertEqual(last_feedbackset_group1.last_updated_by, testuser) self.assertEqual(last_feedbackset_group2.last_updated_by, testuser) self.assertEqual('You have been given a new attempt.', group_comments[0].text) self.assertEqual('You have been given a new attempt.', group_comments[1].text) def test_post_groups_published_raises_error(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment) testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup1) group_baker.feedbackset_first_attempt_unpublished(group=testgroup2) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) with self.assertRaises(http.Http404): self.mock_http302_postrequest( cradmin_role=testassignment, cradmin_instance=self._get_mock_instance(testassignment), cradmin_app=self._get_mock_app(user=testuser), requestuser=testuser, viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline }, requestkwargs={ 'data': { 'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)), 'comment_text': 'You have been given a new attempt.', 'selected_items': [testgroup1.id, testgroup2.id] } } ) self.assertEqual(2, group_models.FeedbackSet.objects.count()) class TestSubjectAdminMoveDeadlineFromPreviousView(TestManageDeadlineMoveDeadlineFromPreviousView): def _get_admin_user(self, period): return self._create_subject_admin(period) class TestDepartmentAdminMoveDeadlineFromPreviousView(TestManageDeadlineMoveDeadlineFromPreviousView): def _get_admin_user(self, period): return self._create_department_admin(period) class TestManageDeadlineMoveDeadlineSingleGroup(AdminTestCaseMixin): viewclass = manage_deadline_view.ManageDeadlineSingleGroupView handle_deadline = 'move-deadline' def test_period_admin_move_deadline_last_attempt_is_graded(self): testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start') testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment) group_baker.feedbackset_first_attempt_published(group=testgroup) testuser = self._get_admin_user(testassignment.parentnode) new_deadline = timezone.now() + timezone.timedelta(days=3) new_deadline = new_deadline.replace(microsecond=0) with self.assertRaises(http.Http404): self.mock_http200_getrequest_htmls( cradmin_role=testgroup, cradmin_instance=self._get_mock_instance(testassignment), requestuser=testuser, cradmin_app=self._get_mock_app(testuser), viewkwargs={ 'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline), 'handle_deadline': self.handle_deadline, 'group_id': testgroup.id } ) class TestSubjectAdminManageDeadlineMoveDeadlineSingleGroup(TestManageDeadlineMoveDeadlineSingleGroup): def _get_admin_user(self, period): return self._create_subject_admin(period) class TestDepartmentAdminManageDeadlineMoveDeadlineSingleGroup(TestManageDeadlineMoveDeadlineSingleGroup): def _get_admin_user(self, period): return self._create_department_admin(period)
# Natural Language Toolkit: Clusterer Utilities # # Copyright (C) 2001-2013 NLTK Project # Author: Trevor Cohn <tacohn@cs.mu.oz.au> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT from __future__ import print_function, unicode_literals import copy from sys import stdout from math import sqrt try: import numpy except ImportError: pass from nltk.cluster.api import ClusterI from nltk.compat import python_2_unicode_compatible class VectorSpaceClusterer(ClusterI): """ Abstract clusterer which takes tokens and maps them into a vector space. Optionally performs singular value decomposition to reduce the dimensionality. """ def __init__(self, normalise=False, svd_dimensions=None): """ :param normalise: should vectors be normalised to length 1 :type normalise: boolean :param svd_dimensions: number of dimensions to use in reducing vector dimensionsionality with SVD :type svd_dimensions: int """ self._Tt = None self._should_normalise = normalise self._svd_dimensions = svd_dimensions def cluster(self, vectors, assign_clusters=False, trace=False): assert len(vectors) > 0 # normalise the vectors if self._should_normalise: vectors = list(map(self._normalise, vectors)) # use SVD to reduce the dimensionality if self._svd_dimensions and self._svd_dimensions < len(vectors[0]): [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors))) S = d[:self._svd_dimensions] * \ numpy.identity(self._svd_dimensions, numpy.float64) T = u[:,:self._svd_dimensions] Dt = vt[:self._svd_dimensions,:] vectors = numpy.transpose(numpy.dot(S, Dt)) self._Tt = numpy.transpose(T) # call abstract method to cluster the vectors self.cluster_vectorspace(vectors, trace) # assign the vectors to clusters if assign_clusters: print(self._Tt, vectors) return [self.classify(vector) for vector in vectors] def cluster_vectorspace(self, vectors, trace): """ Finds the clusters using the given set of vectors. """ raise NotImplementedError() def classify(self, vector): if self._should_normalise: vector = self._normalise(vector) if self._Tt is not None: vector = numpy.dot(self._Tt, vector) cluster = self.classify_vectorspace(vector) return self.cluster_name(cluster) def classify_vectorspace(self, vector): """ Returns the index of the appropriate cluster for the vector. """ raise NotImplementedError() def likelihood(self, vector, label): if self._should_normalise: vector = self._normalise(vector) if self._Tt is not None: vector = numpy.dot(self._Tt, vector) return self.likelihood_vectorspace(vector, label) def likelihood_vectorspace(self, vector, cluster): """ Returns the likelihood of the vector belonging to the cluster. """ predicted = self.classify_vectorspace(vector) return (1.0 if cluster == predicted else 0.0) def vector(self, vector): """ Returns the vector after normalisation and dimensionality reduction """ if self._should_normalise: vector = self._normalise(vector) if self._Tt is not None: vector = numpy.dot(self._Tt, vector) return vector def _normalise(self, vector): """ Normalises the vector to unit length. """ return vector / sqrt(numpy.dot(vector, vector)) def euclidean_distance(u, v): """ Returns the euclidean distance between vectors u and v. This is equivalent to the length of the vector (u - v). """ diff = u - v return sqrt(numpy.dot(diff, diff)) def cosine_distance(u, v): """ Returns 1 minus the cosine of the angle between vectors v and u. This is equal to 1 - (u.v / |u||v|). """ return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v)))) class _DendrogramNode(object): """ Tree node of a dendrogram. """ def __init__(self, value, *children): self._value = value self._children = children def leaves(self, values=True): if self._children: leaves = [] for child in self._children: leaves.extend(child.leaves(values)) return leaves elif values: return [self._value] else: return [self] def groups(self, n): queue = [(self._value, self)] while len(queue) < n: priority, node = queue.pop() if not node._children: queue.push((priority, node)) break for child in node._children: if child._children: queue.append((child._value, child)) else: queue.append((0, child)) # makes the earliest merges at the start, latest at the end queue.sort() groups = [] for priority, node in queue: groups.append(node.leaves()) return groups @python_2_unicode_compatible class Dendrogram(object): """ Represents a dendrogram, a tree with a specified branching order. This must be initialised with the leaf items, then iteratively call merge for each branch. This class constructs a tree representing the order of calls to the merge function. """ def __init__(self, items=[]): """ :param items: the items at the leaves of the dendrogram :type items: sequence of (any) """ self._items = [_DendrogramNode(item) for item in items] self._original_items = copy.copy(self._items) self._merge = 1 def merge(self, *indices): """ Merges nodes at given indices in the dendrogram. The nodes will be combined which then replaces the first node specified. All other nodes involved in the merge will be removed. :param indices: indices of the items to merge (at least two) :type indices: seq of int """ assert len(indices) >= 2 node = _DendrogramNode(self._merge, *[self._items[i] for i in indices]) self._merge += 1 self._items[indices[0]] = node for i in indices[1:]: del self._items[i] def groups(self, n): """ Finds the n-groups of items (leaves) reachable from a cut at depth n. :param n: number of groups :type n: int """ if len(self._items) > 1: root = _DendrogramNode(self._merge, *self._items) else: root = self._items[0] return root.groups(n) def show(self, leaf_labels=[]): """ Print the dendrogram in ASCII art to standard out. :param leaf_labels: an optional list of strings to use for labeling the leaves :type leaf_labels: list """ # ASCII rendering characters JOIN, HLINK, VLINK = '+', '-', '|' # find the root (or create one) if len(self._items) > 1: root = _DendrogramNode(self._merge, *self._items) else: root = self._items[0] leaves = self._original_items if leaf_labels: last_row = leaf_labels else: last_row = ["%s" % leaf._value for leaf in leaves] # find the bottom row and the best cell width width = max(map(len, last_row)) + 1 lhalf = width / 2 rhalf = width - lhalf - 1 # display functions def format(centre, left=' ', right=' '): return '%s%s%s' % (lhalf*left, centre, right*rhalf) def display(str): stdout.write(str) # for each merge, top down queue = [(root._value, root)] verticals = [ format(' ') for leaf in leaves ] while queue: priority, node = queue.pop() child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children)) indices = list(map(leaves.index, child_left_leaf)) if child_left_leaf: min_idx = min(indices) max_idx = max(indices) for i in range(len(leaves)): if leaves[i] in child_left_leaf: if i == min_idx: display(format(JOIN, ' ', HLINK)) elif i == max_idx: display(format(JOIN, HLINK, ' ')) else: display(format(JOIN, HLINK, HLINK)) verticals[i] = format(VLINK) elif min_idx <= i <= max_idx: display(format(HLINK, HLINK, HLINK)) else: display(verticals[i]) display('\n') for child in node._children: if child._children: queue.append((child._value, child)) queue.sort() for vertical in verticals: display(vertical) display('\n') # finally, display the last line display(''.join(item.center(width) for item in last_row)) display('\n') def __repr__(self): if len(self._items) > 1: root = _DendrogramNode(self._merge, *self._items) else: root = self._items[0] leaves = root.leaves(False) return '<Dendrogram with %d leaves>' % len(leaves)
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v9.resources.types import feed_item from google.ads.googleads.v9.services.types import feed_item_service from .base import FeedItemServiceTransport, DEFAULT_CLIENT_INFO class FeedItemServiceGrpcTransport(FeedItemServiceTransport): """gRPC backend transport for FeedItemService. Service to manage feed items. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = google.auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) def close(self): self.grpc_channel.close() @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_feed_item( self, ) -> Callable[[feed_item_service.GetFeedItemRequest], feed_item.FeedItem]: r"""Return a callable for the get feed item method over gRPC. Returns the requested feed item in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetFeedItemRequest], ~.FeedItem]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_feed_item" not in self._stubs: self._stubs["get_feed_item"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v9.services.FeedItemService/GetFeedItem", request_serializer=feed_item_service.GetFeedItemRequest.serialize, response_deserializer=feed_item.FeedItem.deserialize, ) return self._stubs["get_feed_item"] @property def mutate_feed_items( self, ) -> Callable[ [feed_item_service.MutateFeedItemsRequest], feed_item_service.MutateFeedItemsResponse, ]: r"""Return a callable for the mutate feed items method over gRPC. Creates, updates, or removes feed items. Operation statuses are returned. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `CollectionSizeError <>`__ `CriterionError <>`__ `DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__ `FeedItemError <>`__ `FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__ `IdError <>`__ `InternalError <>`__ `ListOperationError <>`__ `MutateError <>`__ `NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__ `QuotaError <>`__ `RangeError <>`__ `RequestError <>`__ `SizeLimitError <>`__ `StringFormatError <>`__ `StringLengthError <>`__ `UrlFieldError <>`__ Returns: Callable[[~.MutateFeedItemsRequest], ~.MutateFeedItemsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_feed_items" not in self._stubs: self._stubs["mutate_feed_items"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v9.services.FeedItemService/MutateFeedItems", request_serializer=feed_item_service.MutateFeedItemsRequest.serialize, response_deserializer=feed_item_service.MutateFeedItemsResponse.deserialize, ) return self._stubs["mutate_feed_items"] __all__ = ("FeedItemServiceGrpcTransport",)
from django.shortcuts import * from django.template import RequestContext from django.core.urlresolvers import reverse from django.core.paginator import Paginator from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_protect from django.contrib.auth.models import User from django.contrib import messages from django.conf import settings from .models import Question, Answer from .forms import QuestionForm, AnswerForm def questions(request, page=None, template_name='questions/questions.html'): '''Displays the list of questions.''' page = page or 1 order_dict = { 'newest': '-when', 'oldest': 'when' } # TODO: This can't be hardcoded, has to be an option on the # sorting/filtering toolbar. results = Question.objects.all().order_by(order_dict.get( request.GET.get('order', 'newest'))).select_related( 'author').prefetch_related('category', 'reporters', 'hearters') paginator = Paginator(results, settings.QUESTIONS_PER_PAGE) # TODO: IMPORTANT: The _count bits below *need* to be cached. context = { 'questions_list': paginator.page(page), 'question_count': Question.objects.count(), 'answer_count': Answer.objects.count(), 'user_count': User.objects.count(), 'order': request.GET.get('order', 'newest'), 'page': page, } return render_to_response(template_name, context, RequestContext(request)) def question(request, pk, slug, template_name='questions/question.html'): '''Displays a specific question. Also allows user to answer a question.''' answer_form = None question = get_object_or_404(Question, pk=pk, slug=slug) if request.method == 'POST': if not request.user.is_authenticated(): return redirect(reverse('login')) answer_form = AnswerForm(request.POST) if question.author == request.user: context = { 'error_message': '''It looks like you tried answer your own question. That isn't allowed.''' } return render(request, '401.html', status=401, dictionary=context) if answer_form.is_valid(): answer = answer_form.save(commit=False) answer.question = Question.objects.get(pk=pk, slug=slug) answer.author = request.user if Answer.objects.filter(question__pk=pk, author=request.user).exists(): context = { 'error_message': 'You\'ve already answered this question and can\'t answer it again.' } return render(request, '401.html', status=401, dictionary=context) canswer_strip = (''.join(c for c in question.correct_answer if c.isalnum())).lower() answer_strip = (''.join(c for c in answer.answer if c.isalnum())).lower() if answer_strip == canswer_strip: answer.author.get_profile().add_monthly_score(settings.POINTS_PER_ANSWER) answer.correct = True messages.success(request, 'Woot! Your question was reviewed automatically and marked correct. +10 points') answer.save() else: answer_form = AnswerForm() context = { 'question': question, 'answer_form': answer_form, 'hide_answer_link': True, } if request.user.is_authenticated(): context['answered'] = question.is_answered(request.user) context['answer'] = Answer.objects.filter(question__pk=pk, author=request.user) if context['answer'].exists(): context['answer'] = context['answer'][0] else: del context['answer'] return render_to_response(template_name, context, RequestContext(request)) @csrf_protect def delete_question(request, pk, slug): ''' Delete a question. Only for internal use, the API has another method. It's assumed that the user has already confirmed this action. ''' if request.method != 'POST': # We should probably show an error message or something here, but # this really isn't a view that might be accessed manually by a user. return redirect(reverse('questions')) question = get_object_or_404(Question, pk=pk, slug=slug) user = request.user if user.is_authenticated() and user.get_profile().can_edit(question): question.delete() messages.success(request, 'The question was deleted successfully.') else: context = { 'error_message': '''It looks like you tried to delete someone else\'s question (which you obviously can\'t do.) If you\'re sure this is your question, try clearing your browser\'s cookies and logging in again.''' } return render(request, '401.html', status=401, dictionary=context) return redirect(request.GET.get('next', reverse('questions'))) @csrf_protect def report_question(request, pk, slug): ''' Report a question. Only for internal use, the API has another method. It's assumed that the user has already confirmed this action. If the user has already reported this question, then it is "un-reported" ''' if request.method != 'POST': # We should probably show an error message or something here, but # this really isn't a view that might be accessed manually by a user. return redirect(reverse('questions')) question = get_object_or_404(Question, pk=pk, slug=slug) user = request.user if user.is_authenticated() and question.author != user: question.toggle_report(user) messages.info(request, 'The question was reported successfully.') else: return render(request, 'questions/report_error.html', status=401) return redirect(request.GET.get('next', reverse('questions'))) @login_required def ask(request, template_name='questions/ask.html'): ''' Displays a form to ask a question and creates a new question if validated. ''' form = None if request.method == 'POST': form = QuestionForm(request.POST) if form.is_valid(): question = form.save(commit=False) question.author = request.user question.save() return redirect(question.get_absolute_url()) else: form = QuestionForm() return render_to_response(template_name, {'form': form}, RequestContext(request)) @login_required def reviews(request, template_name='questions/reviews.html'): if request.method == 'POST': answer = Answer.objects.get(pk=request.POST.get('answer-pk')) if not request.user.get_profile().can_edit(answer.question): context = { 'error_message': '''It looks like you tried to review an answer on someone else's question.''' } return render(request, '401.html', status=401, dictionary=context) if not answer.correct is None: context = { 'error_message': '''The answer you tried to review has already been reviewed.''' } return render(request, '401.html', status=401, dictionary=context) correct = None if 'mark-correct' in request.POST: correct = True answer.author.get_profile().add_monthly_score(settings.POINTS_PER_ANSWER) elif 'mark-incorrect' in request.POST: correct = False # If correct is None, then the answer will be assumed as 'not reviewed' answer.correct = correct answer.save() # Ugly and counterintuitive, but possibly the best (+fastest) way to do it # Basically: "Give me a list of all answers for questions whose author is # the current user and which are not reviewed. Now, give me a list of all # distinct questions whose answers are in the first list." # We're interested in the latter. if request.user.is_staff: answers_list = Answer.objects.filter(correct=None) else: answers_list = Answer.objects.filter(question__author = request.user, correct=None) questions_list = Question.objects.filter(answers__in=answers_list).distinct() context = { 'questions_list': questions_list, } return render_to_response(template_name, context, RequestContext(request)) def about(request): return render_to_response('meta_about.html', {}, RequestContext(request))
# Copyright 2014-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original implementation by Rene de Jong. Updated by Sascha Bischoff. # pylint: disable=E1101 import logging import os import re import shutil import socket import subprocess import sys import tarfile import time from pexpect import EOF, TIMEOUT, pxssh from wlauto import settings, Parameter from wlauto.core.resource import NO_ONE from wlauto.common.resources import Executable from wlauto.core import signal as sig from wlauto.exceptions import DeviceError from wlauto.utils import ssh, types class BaseGem5Device(object): """ Base implementation for a gem5-based device This class is used as the base class for OS-specific devices such as the G3m5LinuxDevice and the Gem5AndroidDevice. The majority of the gem5-specific functionality is included here. Note: When inheriting from this class, make sure to inherit from this class prior to inheriting from the OS-specific class, i.e. LinuxDevice, to ensure that the methods are correctly overridden. """ # gem5 can be very slow. Hence, we use some very long timeouts! delay = 3600 long_delay = 3 * delay ready_timeout = long_delay default_timeout = delay platform = None path_module = 'posixpath' parameters = [ Parameter('gem5_binary', kind=str, default='./build/ARM/gem5.fast', mandatory=False, description="Command used to execute gem5. " "Adjust according to needs."), Parameter('gem5_args', kind=types.arguments, mandatory=True, description="Command line passed to the gem5 simulation. This" " command line is used to set up the simulated system, and " "should be the same as used for a standard gem5 simulation " "without workload automation. Note that this is simulation " "script specific and will hence need to be tailored to each " "particular use case."), Parameter('gem5_vio_args', kind=types.arguments, mandatory=True, constraint=lambda x: "{}" in str(x), description="gem5 VirtIO command line used to enable the " "VirtIO device in the simulated system. At the very least, " "the root parameter of the VirtIO9PDiod device must be " "exposed on the command line. Please set this root mount to " "{}, as it will be replaced with the directory used by " "Workload Automation at runtime."), Parameter('temp_dir', kind=str, default='/tmp', description="Temporary directory used to pass files into the " "gem5 simulation. Workload Automation will automatically " "create a directory in this folder, and will remove it again " "once the simulation completes."), Parameter('checkpoint', kind=bool, default=False, mandatory=False, description="This parameter " "tells Workload Automation to create a checkpoint of the " "simulated system once the guest system has finished booting." " This checkpoint can then be used at a later stage by other " "WA runs to avoid booting the guest system a second time. Set" " to True to take a checkpoint of the simulated system post " "boot."), Parameter('run_delay', kind=int, default=0, mandatory=False, constraint=lambda x: x >= 0, description="This sets the time that the " "system should sleep in the simulated system prior to " "running and workloads or taking checkpoints. This allows " "the system to quieten down prior to running the workloads. " "When this is combined with the checkpoint_post_boot" " option, it allows the checkpoint to be created post-sleep," " and therefore the set of workloads resuming from this " "checkpoint will not be required to sleep.") ] @property def is_rooted(self): # pylint: disable=R0201 # gem5 is always rooted return True # pylint: disable=E0203 def __init__(self): self.logger = logging.getLogger('gem5Device') # The gem5 subprocess self.gem5 = None self.gem5_port = -1 self.gem5outdir = os.path.join(settings.output_directory, "gem5") self.m5_path = 'm5' self.stdout_file = None self.stderr_file = None self.stderr_filename = None self.sckt = None # Find the first one that does not exist. Ensures that we do not re-use # the directory used by someone else. for i in xrange(sys.maxint): directory = os.path.join(self.temp_dir, "wa_{}".format(i)) try: os.stat(directory) continue except OSError: break self.temp_dir = directory self.logger.debug("Using {} as the temporary directory.".format(self.temp_dir)) # Start the gem5 simulation when WA starts a run using a signal. sig.connect(self.init_gem5, sig.RUN_START) def validate(self): # Assemble the virtio args self.gem5_vio_args = str(self.gem5_vio_args).format(self.temp_dir) # pylint: disable=W0201 self.logger.debug("gem5 VirtIO command: {}".format(self.gem5_vio_args)) def init_gem5(self, _): """ Start gem5, find out the telnet port and connect to the simulation. We first create the temporary directory used by VirtIO to pass files into the simulation, as well as the gem5 output directory.We then create files for the standard output and error for the gem5 process. The gem5 process then is started. """ self.logger.info("Creating temporary directory: {}".format(self.temp_dir)) os.mkdir(self.temp_dir) os.mkdir(self.gem5outdir) # We need to redirect the standard output and standard error for the # gem5 process to a file so that we can debug when things go wrong. f = os.path.join(self.gem5outdir, 'stdout') self.stdout_file = open(f, 'w') f = os.path.join(self.gem5outdir, 'stderr') self.stderr_file = open(f, 'w') # We need to keep this so we can check which port to use for the telnet # connection. self.stderr_filename = f self.start_gem5() def start_gem5(self): """ Starts the gem5 simulator, and parses the output to get the telnet port. """ self.logger.info("Starting the gem5 simulator") command_line = "{} --outdir={}/gem5 {} {}".format(self.gem5_binary, settings.output_directory, self.gem5_args, self.gem5_vio_args) self.logger.debug("gem5 command line: {}".format(command_line)) self.gem5 = subprocess.Popen(command_line.split(), stdout=self.stdout_file, stderr=self.stderr_file) while self.gem5_port == -1: # Check that gem5 is running! if self.gem5.poll(): raise DeviceError("The gem5 process has crashed with error code {}!".format(self.gem5.poll())) # Open the stderr file f = open(self.stderr_filename, 'r') for line in f: m = re.search(r"Listening\ for\ system\ connection\ on\ port\ (?P<port>\d+)", line) if m: port = int(m.group('port')) if port >= 3456 and port < 5900: self.gem5_port = port f.close() break else: time.sleep(1) f.close() def connect(self): # pylint: disable=R0912,W0201 """ Connect to the gem5 simulation and wait for Android to boot. Then, create checkpoints, and mount the VirtIO device. """ self.connect_gem5() self.wait_for_boot() if self.run_delay: self.logger.info("Sleeping for {} seconds in the guest".format(self.run_delay)) self.gem5_shell("sleep {}".format(self.run_delay)) if self.checkpoint: self.checkpoint_gem5() self.mount_virtio() self.logger.info("Creating the working directory in the simulated system") self.gem5_shell('mkdir -p {}'.format(self.working_directory)) self._is_ready = True # pylint: disable=W0201 def wait_for_boot(self): pass def connect_gem5(self): # pylint: disable=R0912 """ Connect to the telnet port of the gem5 simulation. We connect, and wait for the prompt to be found. We do not use a timeout for this, and wait for the prompt in a while loop as the gem5 simulation can take many hours to reach a prompt when booting the system. We also inject some newlines periodically to try and force gem5 to show a prompt. Once the prompt has been found, we replace it with a unique prompt to ensure that we are able to match it properly. We also disable the echo as this simplifies parsing the output when executing commands on the device. """ self.logger.info("Connecting to the gem5 simulation on port {}".format(self.gem5_port)) host = socket.gethostname() port = self.gem5_port # Connect to the gem5 telnet port. Use a short timeout here. attempts = 0 while attempts < 10: attempts += 1 try: self.sckt = ssh.TelnetConnection() self.sckt.login(host, 'None', port=port, auto_prompt_reset=False, login_timeout=10) break except pxssh.ExceptionPxssh: pass else: self.gem5.kill() raise DeviceError("Failed to connect to the gem5 telnet session.") self.logger.info("Connected! Waiting for prompt...") # We need to find the prompt. It might be different if we are resuming # from a checkpoint. Therefore, we test multiple options here. prompt_found = False while not prompt_found: try: self.login_to_device() except TIMEOUT: pass try: # Try and force a prompt to be shown self.sckt.send('\n') self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60) prompt_found = True except TIMEOUT: pass self.logger.info("Setting unique prompt...") self.sckt.set_unique_prompt() self.sckt.prompt() self.logger.info("Prompt found and replaced with a unique string") # We check that the prompt is what we think it should be. If not, we # need to update the regex we use to match. self.find_prompt() self.sckt.setecho(False) self.sync_gem5_shell() self.resize_shell() def get_properties(self, context): # pylint: disable=R0801 """ Get the property files from the device """ for propfile in self.property_files: try: normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.') outfile = os.path.join(context.host_working_directory, normname) if self.is_file(propfile): self.execute('cat {} > {}'.format(propfile, normname)) self.pull_file(normname, outfile) elif self.is_directory(propfile): self.get_directory(context, propfile) continue else: continue except DeviceError: # We pull these files "opportunistically", so if a pull fails # (e.g. we don't have permissions to read the file), just note # it quietly (not as an error/warning) and move on. self.logger.debug('Could not pull property file "{}"'.format(propfile)) return {} def get_directory(self, context, directory): """ Pull a directory from the device """ normname = directory.lstrip(self.path.sep).replace(self.path.sep, '.') outdir = os.path.join(context.host_working_directory, normname) temp_file = os.path.join(context.host_working_directory, "{}.tar".format(normname)) # Check that the folder exists self.gem5_shell("ls -la {}".format(directory)) # Compress the folder try: self.gem5_shell("{} tar -cvf {}.tar {}".format(self.busybox, normname, directory)) except DeviceError: self.logger.debug("Failed to run tar command on device! Not pulling {}".format(directory)) return self.pull_file(normname, temp_file) f = tarfile.open(temp_file, 'r') os.mkdir(outdir) f.extractall(outdir) os.remove(temp_file) def get_pids_of(self, process_name): """ Returns a list of PIDs of all processes with the specified name. """ result = self.gem5_shell('ps | {} grep {}'.format(self.busybox, process_name), check_exit_code=False).strip() if result and 'not found' not in result and len(result.split('\n')) > 2: return [int(x.split()[1]) for x in result.split('\n')] else: return [] def find_prompt(self): prompt = r'\[PEXPECT\][\\\$\#]+ ' synced = False while not synced: self.sckt.send('\n') i = self.sckt.expect([prompt, self.sckt.UNIQUE_PROMPT, r'[\$\#] '], timeout=self.delay) if i == 0: synced = True elif i == 1: prompt = self.sckt.UNIQUE_PROMPT synced = True else: prompt = re.sub(r'\$', r'\\\$', self.sckt.before.strip() + self.sckt.after.strip()) prompt = re.sub(r'\#', r'\\\#', prompt) prompt = re.sub(r'\[', r'\[', prompt) prompt = re.sub(r'\]', r'\]', prompt) self.sckt.PROMPT = prompt def close(self): if self._logcat_poller: self._logcat_poller.stop() def reset(self): self.logger.warn("Attempt to restart the gem5 device. This is not " "supported!") # pylint: disable=unused-argument def push_file(self, source, dest, **kwargs): """ Push a file to the gem5 device using VirtIO The file to push to the device is copied to the temporary directory on the host, before being copied within the simulation to the destination. Checks, in the form of 'ls' with error code checking, are performed to ensure that the file is copied to the destination. """ filename = os.path.basename(source) self.logger.debug("Pushing {} to device.".format(source)) self.logger.debug("temp_dir: {}".format(self.temp_dir)) self.logger.debug("dest: {}".format(dest)) self.logger.debug("filename: {}".format(filename)) # We need to copy the file to copy to the temporary directory self.move_to_temp_dir(source) # Back to the gem5 world self.gem5_shell("ls -al /mnt/obb/{}".format(filename)) if self.busybox: self.gem5_shell("{} cp /mnt/obb/{} {}".format(self.busybox, filename, dest)) else: self.gem5_shell("cat /mnt/obb/{} > {}".format(filename, dest)) self.gem5_shell("sync") self.gem5_shell("ls -al {}".format(dest)) self.gem5_shell("ls -al /mnt/obb/") self.logger.debug("Push complete.") # pylint: disable=unused-argument def pull_file(self, source, dest, **kwargs): """ Pull a file from the gem5 device using m5 writefile The file is copied to the local directory within the guest as the m5 writefile command assumes that the file is local. The file is then written out to the host system using writefile, prior to being moved to the destination on the host. """ filename = os.path.basename(source) self.logger.debug("pull_file {} {}".format(source, filename)) # We don't check the exit code here because it is non-zero if the source # and destination are the same. The ls below will cause an error if the # file was not where we expected it to be. self.gem5_shell("{} cp {} {}".format(self.busybox, source, filename), check_exit_code=False) self.gem5_shell("sync") self.gem5_shell("ls -la {}".format(filename)) self.logger.debug('Finished the copy in the simulator') self.gem5_util("writefile {}".format(filename)) if 'cpu' not in filename: while not os.path.exists(os.path.join(self.gem5outdir, filename)): time.sleep(1) # Perform the local move shutil.move(os.path.join(self.gem5outdir, filename), dest) self.logger.debug("Pull complete.") # pylint: disable=unused-argument def delete_file(self, filepath, **kwargs): """ Delete a file on the device """ self._check_ready() self.gem5_shell("rm '{}'".format(filepath)) def file_exists(self, filepath): """ Check if a file exists """ self._check_ready() output = self.gem5_shell('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath)) try: if int(output): return True except ValueError: # If we cannot process the output, assume that there is no file pass return False def disconnect(self): """ Close and disconnect from the gem5 simulation. Additionally, we remove the temporary directory used to pass files into the simulation. """ self.logger.info("Gracefully terminating the gem5 simulation.") try: self.gem5_util("exit") self.gem5.wait() except EOF: pass self.logger.info("Removing the temporary directory") try: shutil.rmtree(self.temp_dir) except OSError: self.logger.warn("Failed to remove the temporary directory!") # gem5 might be slow. Hence, we need to make the ping timeout very long. def ping(self): self.logger.debug("Pinging gem5 to see if it is still alive") self.gem5_shell('ls /', timeout=self.longdelay) # Additional Android-specific methods. def forward_port(self, _): # pylint: disable=R0201 raise DeviceError('we do not need forwarding') # gem5 should dump out a framebuffer. We can use this if it exists. Failing # that, fall back to the parent class implementation. def capture_screen(self, filepath): file_list = os.listdir(self.gem5outdir) screen_caps = [] for f in file_list: if '.bmp' in f: screen_caps.append(f) if len(screen_caps) == 1: # Bail out if we do not have image, and resort to the slower, built # in method. try: import Image gem5_image = os.path.join(self.gem5outdir, screen_caps[0]) temp_image = os.path.join(self.gem5outdir, "file.png") im = Image.open(gem5_image) im.save(temp_image, "PNG") shutil.copy(temp_image, filepath) os.remove(temp_image) self.logger.debug("capture_screen: using gem5 screencap") return True except (shutil.Error, ImportError, IOError): pass return False def capture_view_hierachy(self, filepath): pass # TODO # pylint: disable=W0613 def execute(self, command, timeout=1000, check_exit_code=True, background=False, as_root=False, busybox=False, **kwargs): self._check_ready() if as_root and not self.is_rooted: raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command)) if busybox: if not self.is_rooted: raise DeviceError('Attempting to execute "{}" with busybox. '.format(command) + 'Busybox can only be deployed to rooted devices.') command = ' '.join([self.busybox, command]) if background: self.logger.debug("Attempt to execute in background. Not supported " "in gem5, hence ignored.") return self.gem5_shell(command, as_root=as_root) # Internal methods: do not use outside of the class. def _check_ready(self): """ Check if the device is ready. As this is gem5, we just assume that the device is ready once we have connected to the gem5 simulation, and updated the prompt. """ if not self._is_ready: raise DeviceError('Device not ready.') def gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True): # pylint: disable=R0912 """ Execute a command in the gem5 shell This wraps the telnet connection to gem5 and processes the raw output. This method waits for the shell to return, and then will try and separate the output from the command from the command itself. If this fails, warn, but continue with the potentially wrong output. The exit code is also checked by default, and non-zero exit codes will raise a DeviceError. """ conn = self.sckt if sync: self.sync_gem5_shell() self.logger.debug("gem5_shell command: {}".format(command)) # Send the actual command conn.send("{}\n".format(command)) # Wait for the response. We just sit here and wait for the prompt to # appear, as gem5 might take a long time to provide the output. This # avoids timeout issues. command_index = -1 while command_index == -1: if conn.prompt(): output = re.sub(r' \r([^\n])', r'\1', conn.before) output = re.sub(r'[\b]', r'', output) # Deal with line wrapping output = re.sub(r'[\r].+?<', r'', output) command_index = output.find(command) # If we have -1, then we cannot match the command, but the # prompt has returned. Hence, we have a bit of an issue. We # warn, and return the whole output. if command_index == -1: self.logger.warn("gem5_shell: Unable to match command in " "command output. Expect parsing errors!") command_index = 0 output = output[command_index + len(command):].strip() # It is possible that gem5 will echo the command. Therefore, we need to # remove that too! command_index = output.find(command) if command_index != -1: output = output[command_index + len(command):].strip() self.logger.debug("gem5_shell output: {}".format(output)) # We get a second prompt. Hence, we need to eat one to make sure that we # stay in sync. If we do not do this, we risk getting out of sync for # slower simulations. self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay) if check_exit_code: exit_code_text = self.gem5_shell('echo $?', as_root=as_root, timeout=timeout, check_exit_code=False, sync=False) try: exit_code = int(exit_code_text.split()[0]) if exit_code: message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}' raise DeviceError(message.format(exit_code, command, output)) except (ValueError, IndexError): self.logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text)) return output def gem5_util(self, command): """ Execute a gem5 utility command using the m5 binary on the device """ self.gem5_shell('{} {}'.format(self.m5_path, command)) def sync_gem5_shell(self): """ Synchronise with the gem5 shell. Write some unique text to the gem5 device to allow us to synchronise with the shell output. We actually get two prompts so we need to match both of these. """ self.logger.debug("Sending Sync") self.sckt.send("echo \*\*sync\*\*\n") self.sckt.expect(r"\*\*sync\*\*", timeout=self.delay) self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay) self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay) def resize_shell(self): """ Resize the shell to avoid line wrapping issues. """ # Try and avoid line wrapping as much as possible. Don't check the error # codes from these command because some of them WILL fail. self.gem5_shell('stty columns 1024', check_exit_code=False) self.gem5_shell('{} stty columns 1024'.format(self.busybox), check_exit_code=False) self.gem5_shell('stty cols 1024', check_exit_code=False) self.gem5_shell('{} stty cols 1024'.format(self.busybox), check_exit_code=False) self.gem5_shell('reset', check_exit_code=False) def move_to_temp_dir(self, source): """ Move a file to the temporary directory on the host for copying to the gem5 device """ command = "cp {} {}".format(source, self.temp_dir) self.logger.debug("Local copy command: {}".format(command)) subprocess.call(command.split()) subprocess.call("sync".split()) def checkpoint_gem5(self, end_simulation=False): """ Checkpoint the gem5 simulation, storing all system state """ self.logger.info("Taking a post-boot checkpoint") self.gem5_util("checkpoint") if end_simulation: self.disconnect() def mount_virtio(self): """ Mount the VirtIO device in the simulated system. """ self.logger.info("Mounting VirtIO device in simulated system") self.gem5_shell('mkdir -p /mnt/obb') mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 /mnt/obb".format(self.temp_dir) self.gem5_shell(mount_command) def deploy_m5(self, context, force=False): """ Deploys the m5 binary to the device and returns the path to the binary on the device. :param force: by default, if the binary is already present on the device, it will not be deployed again. Setting force to ``True`` overrides that behaviour and ensures that the binary is always copied. Defaults to ``False``. :returns: The on-device path to the m5 binary. """ on_device_executable = self.path.join(self.binaries_directory, 'm5') if not force and self.file_exists(on_device_executable): # We want to check the version of the binary. We cannot directly # check this because the m5 binary itself is unversioned. We also # need to make sure not to check the error code as "m5 --help" # returns a non-zero error code. output = self.gem5_shell('m5 --help', check_exit_code=False) if "writefile" in output: self.logger.debug("Using the m5 binary on the device...") self.m5_path = on_device_executable return on_device_executable else: self.logger.debug("m5 on device does not support writefile!") host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'm5')) self.logger.info("Installing the m5 binary to the device...") self.m5_path = self.install(host_file) return self.m5_path
# ABC Parser for ABC Music Notation Files from __future__ import division import re import string import math from Preprocess import globalConstant class TuneBook(object): """ Represents a tunebook with tunes and free text. Properties ---------- text An array of free text blocks, as strings. tune An array of tunes, as Tune objects. """ def __init__(self, filename=None): """ Creates a TuneBook object. If a filename is given, the file is opened and parsed. If an invalid filename is given, throws IOError. """ self.text = [] # array of text blocks as strings self.tune = [] # array of tunes as Tune if filename: f = open(filename, 'Ur') self.parse(f.read()) f.close() def parse(self, str): """ Parses the given input. """ for lines in str.split('\n\n'): if 'x:' in lines.lower(): tune = Tune() tune.parse(lines) self.tune.append(tune) else: self.text.append(lines) ############################################################################## class Tune(object): """ Represents an entire tune with information fields and music. Properties ---------- text An array of the lines of the tune, as strings. line An array of the lines of the tune, as Line objects (see below). """ def __init__(self, filename=None): """ Creates a Tune object. If a filename is given, the file is opened and parsed.If an invalid filename is given, throws IOError. """ self._fields = {} # information fields self.text = [] # array of tune lines as strings self.line = [] # array of tune lines as Line if filename: f = open(filename, 'Ur') self.parse(f.read()) f.close() def field(self, field): """ Returns an information field (e.g., "T", "X"), or None if the given field doesn't exist. """ if field in self._fields: return self._fields[field] else: return None def parse(self, str): """ Parses the given input ABC string. """ lineBuffer = '' lines = str.split('\n') for line in lines: # Strip superfluous characters. line = re.sub('%.*$', '', line) # Strip comments. line = line.lstrip().rstrip() # Strip whitespace. # Ignore blank lines. if len(line) == 0: continue # If the lines begins with a letter and a colon, it's an information # field. Extract it. matches = re.match('([A-Za-z]):\s*(.*)', line) if matches: #(0) matches the whole regular expression. #(1) matches the first pattern. #(2) matches the second pattern,etc. self._parseInformationField(matches.group(1), matches.group(2)) else: # We have a tune line. if line[-1] == "\\": # The current line ends with a \, so just store it in the buffer # for now. lineBuffer += line.rstrip("\\") else: # The current line does not end with a \, so add it to whatever # lines we might have seen previously and parse it. lineBuffer += line self.text.append(lineBuffer) # Store raw tune line. self.line.append(Line(lineBuffer)) lineBuffer = '' def _parseInformationField(self, field, data): # Parses an information field. field is a letter, while data is the # data following the field identifier. field is converted to uppercase # before storage. Only the first occurrence of the field is stored. field = field.upper() if field not in self._fields: self._fields[field] = data def getFields(self): return self._fields ############################################################################## class Line(object): """ Represents one line in a tune. Properties ---------- text The raw text that was parsed. measure An array of Measure objects representing the individual measures within the line. """ def __init__(self, line=None): """ Takes a text line and parses it. """ self.text = None # raw text of the line self.measure = [] # array of Measures if line: self.parse(line) def parse(self, line): """ Parses a line of ABC. """ self.__init__() self.text = line # Split the line into measures. Measure symbols are # |, |], ||, [|, |:, :|, :: measures = re.split('\||\|]|\|\||\[\||\|:|:\||::', line) # Remove empty measures (typically at the end of lines). for item in measures: if len(item.lstrip().rstrip()) == 0: measures.remove(item) self.measure = [] # array of Measure objects for measure in measures: newMeasure = Measure() newMeasure.parse(measure) self.measure.append(newMeasure) def __str__(self): return self.text ############################################################################## class Measure(object): """ Represents one measure of a line of music. Properties ---------- text The raw text of the measure that was parsed. item An array of MusicItem objects representing the individual items (notes and chords) within this measure. repeat The repeat number for this measure, or None if there is no repeat. This only simply repeats, e.g., [1 and [2 """ def __init__(self): """ Constructor. Builds an empty Measure object. """ self._reset() def parse(self, text): """ Parses a string of ABC into Notes and Chords. """ self._reset() self.text = text match = re.search('\[([12])', self.text) if match: # First or second repeat. self.repeat = int(match.group(1)) self._pos += len(match.group(0)) while self._pos < len(self.text): if self.text[self._pos].isspace(): # Ignore whitespace. self._pos += 1 elif self.text[self._pos] == '"': # Parse a chord. self._parseChord() elif self.text[self._pos] in "^=_" or self.text[self._pos].isalpha() or self.text[self._pos] == '#': # Found the start of a note. self._parseNote() else: # Skip over anything we don't recognize. self._pos += 1 def _parseChord(self): # Parses a chord. newChord = Chord() chordText = newChord.parse(self.text[self._pos:]) newChord.beat = self._beat self._beat += newChord.duration self.item.append(newChord) self._pos += len(chordText) + 2 # add 2 to account for the double quotes def _parseNote(self): # Parses a note. newNote = Note() noteText, temp1, temp2, temp3 = newNote.parse(self.text[self._pos:]) newNote.beat = self._beat self._beat += newNote.duration self.item.append(newNote) self._pos += len(noteText) def _reset(self): # Clears out all data. self.item = [] # array of Chords and Notes for this measure self.text = None # raw text of the measure self._pos = 0 # parsing position within the measure self.repeat = None # repeat number (1 or 2) self._beat = 1 # current beat (while parsing) def __str__(self): return self.text ############################################################################## class MusicItem(object): """ Abstract base class for "things" that appear in a line of music: notes and chords. Properties ---------- duration Length of this item as a float, e.g., 0.25, 1, etc. beat The beat on which this item occurs (float). Starts at 1. text The raw text of this item. """ def __init__(self): # Duration of the item as a float, e.g,. 1/4, 1/8, 1/16, 2 self.duration = 0.0 # The beat on which this item occurs: 0, 1, 2, etc. self.beat = 0.0 # Raw text from the tune that makes up this item. self.text = '' def __str__(self): return self.text ############################################################################## class Chord(MusicItem): """ Represents a chord. """ def __init__(self): super(Chord, self).__init__() def parse(self, str): """ Parses a chord out of the given string. Returns the raw text that was parsed from str without the surrounding double quotes. """ pos = 0 if pos < len(str) and str[pos] == '"': self.text += str[pos] pos += 1 else: raise RuntimeError('Chord does not begin with ".' + str) while pos < len(str) and str[pos] != '"': self.text += str[pos] pos += 1 if pos < len(str) and str[pos] == '"': self.text += str[pos] pos += 1 else: raise RuntimeError('Chord does not end with ":' + str) # Remove surrounding double quotes. self.text = self.text[1:-1] return self.text ############################################################################## #get duration information class Note(MusicItem): """ Represents a note. Properties ---------- prefix Optional ^, =, or _ note The note character itself, A, B, etc. suffix Optional ' or , length Optional note length, /4, 2, etc. """ def __init__(self): super(Note, self).__init__() self.prefix = None # optional ^, =, or _ self.note = None # note character [A-z] self.suffix = None # optional ' or , self.length = None # optional length indication self.nextNoteDurationPlus = 0.0 # the value that the next note take away, when the current note has < or > self.nextNoteDurationFlag = False # whether the next note takes away the value or not def parse(self, str, nextNoteDurationPlus = 0.0, nextNoteDurationFlag = False): """ Parses a note out of the given string. Returns the raw text that was parsed from str. """ self.__init__() pos = 0 if str == '#ending': self.text = '#ending' self.duration = 0 self.nextNoteDurationPlus = 0.0 self.nextNoteDurationFlag = False return self.text, self.duration , self.nextNoteDurationPlus, self.nextNoteDurationFlag if pos < len(str) and str[pos] in "^=_": # Sharp, natural, or flat symbol. self.text += str[pos] self.prefix = str[pos] pos += 1 if pos < len(str) and str[pos].isalpha(): # Note letter. self.text += str[pos] self.note = str[pos] pos += 1 else: raise RuntimeError('Note does not contain a character: ' + str.__str__()) if pos < len(str) and str[pos] in "',": # Note raise or lower an octave. self.text += str[pos] self.suffix = str[pos] pos += 1 while pos < len(str) and str[pos] in "/0123456789><": # Note length. self.text += str[pos] if not self.length: self.length = "" self.length += str[pos] pos += 1 #turn the note length(string) into a duration(float). #given that all data is valid slash_count = self.length.__str__().count('/') #this dotted-note notation is only defined between two notes of equal length. #attention: two notes which are of equal length left_count = self.length.__str__().count('<') right_count = self.length.__str__().count('>') self.nextNoteDurationFlag = nextNoteDurationFlag self.nextNoteDurationPlus = nextNoteDurationPlus #print(self.length) #if it is just a sigle note if self.length is None: #if the previous note has < or > suffix if self.nextNoteDurationFlag == True: self.duration = globalConstant.nextNoteDurationBase + self.nextNoteDurationPlus #print(self.duration) #if it does not have else: self.duration = globalConstant.nextNoteDurationBase #print(self.duration) self.nextNoteDurationPlus = 0.0 self.nextNoteDurationFlag = False #if it is a sigle note followed by a number elif slash_count ==0 and left_count ==0 and right_count ==0: #and if the previous note have < or > if self.nextNoteDurationFlag: self.duration = float(re.match('[0-9]', self.length).group(0)) + self.nextNoteDurationPlus #or it does not have < and > else: self.duration = float(re.match('[0-9]', self.length).group(0)) self.nextNoteDurationPlus = 0.0 self.nextNoteDurationFlag = False else: #if it has a / if slash_count == 1: #if it has only a /, without any number if re.search('[0-9]', self.length) == None: #if the previous note has < or > if self.nextNoteDurationFlag == True: self.duration = 1/2 + self.nextNoteDurationPlus else: self.duration = 1/2 #or if it has a / with numbers else: nums = re.findall('[0-9]', self.length) #if it has two number if len(nums) == 2: #if the previous note has < or > if self.nextNoteDurationFlag == True: self.duration = eval(re.match('[0-9]/[0-9]', self.length).group(0)) + self.nextNoteDurationPlus else: self.duration = eval(re.match('[0-9]/[0-9]', self.length).group(0)) #if it has only one number elif len(nums) == 1: #if the case is like /3, it means 1/3 if re.search('[0-9]/', self.length) == None: #if the previous note has < or > if self.nextNoteDurationFlag == True: #self.duration = eval('1/' + re.search('/[0-9]', self.length).group(0)) + _nextNoteDurationPlus self.duration = eval('1/' + nums[0]) + self.nextNoteDurationPlus #if it does not have < and > else: #self.duration = eval('1' + re.search('/[0-9]', self.length).group(0)) self.duration = eval('1/' + nums[0]) ##if the case is like 3/, it means 3/2 else: if self.nextNoteDurationFlag == True: self.duration = eval(nums[0] + '/2') + self.nextNoteDurationPlus else: self.duration = eval(nums[0] + '/2') #if it has more than one / elif slash_count > 1: if self.nextNoteDurationFlag == True: self.duration = globalConstant.nextNoteDurationBase / math.pow(2, slash_count) + self.nextNoteDurationPlus else: self.duration = globalConstant.nextNoteDurationBase / math.pow(2, slash_count) #if it has no / else: # if it has also no number if re.search('[0-9]', self.length) == None: #if the previous note has < or > if self.nextNoteDurationFlag == True: self.duration = globalConstant.nextNoteDurationBase +self.nextNoteDurationPlus #print(self.duration) #if the previous note does not have else: self.duration = globalConstant.nextNoteDurationBase #or if also have one number else: if self.nextNoteDurationFlag == True: self.duration = float(re.search('[0-9]', self.length).group(0)) + self.nextNoteDurationPlus # if the previous note does not have < and > else: self.duration = float(re.search('[0-9]', self.length).group(0)) #if it also has < if left_count != 0: takeaway_part = self.duration / math.pow(2, left_count) self.duration = takeaway_part self.nextNoteDurationFlag = True self.nextNoteDurationPlus = takeaway_part #or if it also has > elif right_count != 0: takeaway_part = self.duration / math.pow(2, right_count) self.duration = self.duration + takeaway_part self.nextNoteDurationFlag = True self.nextNoteDurationPlus = -(takeaway_part) # if it has no < and > else: self.nextNoteDurationFlag = False self.nextNoteDurationPlus = 0.0 return self.text, self.duration , self.nextNoteDurationPlus, self.nextNoteDurationFlag
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ import json import random from frappe.model.document import Document from six import iteritems class DesktopIcon(Document): def validate(self): if not self.label: self.label = self.module_name def on_trash(self): clear_desktop_icons_cache() def after_doctype_insert(): frappe.db.add_unique('Desktop Icon', ('module_name', 'owner', 'standard')) def get_desktop_icons(user=None): '''Return desktop icons for user''' if not user: user = frappe.session.user user_icons = frappe.cache().hget('desktop_icons', user) if not user_icons: fields = ['module_name', 'hidden', 'label', 'link', 'type', 'icon', 'color', '_doctype', '_report', 'idx', 'force_show', 'reverse', 'custom', 'standard', 'blocked'] active_domains = frappe.get_active_domains() blocked_doctypes = frappe.get_all("DocType", filters={ "ifnull(restrict_to_domain, '')": ("not in", ",".join(active_domains)) }, fields=["name"]) blocked_doctypes = [ d.get("name") for d in blocked_doctypes ] standard_icons = frappe.db.get_all('Desktop Icon', fields=fields, filters={'standard': 1}) standard_map = {} for icon in standard_icons: if icon._doctype in blocked_doctypes: icon.blocked = 1 standard_map[icon.module_name] = icon user_icons = frappe.db.get_all('Desktop Icon', fields=fields, filters={'standard': 0, 'owner': user}) # update hidden property for icon in user_icons: standard_icon = standard_map.get(icon.module_name, None) if icon._doctype in blocked_doctypes: icon.blocked = 1 # override properties from standard icon if standard_icon: for key in ('route', 'label', 'color', 'icon', 'link'): if standard_icon.get(key): icon[key] = standard_icon.get(key) if standard_icon.blocked: icon.hidden = 1 # flag for modules_setup page icon.hidden_in_standard = 1 elif standard_icon.force_show: icon.hidden = 0 # add missing standard icons (added via new install apps?) user_icon_names = [icon.module_name for icon in user_icons] for standard_icon in standard_icons: if standard_icon.module_name not in user_icon_names: # if blocked, hidden too! if standard_icon.blocked: standard_icon.hidden = 1 standard_icon.hidden_in_standard = 1 user_icons.append(standard_icon) user_blocked_modules = frappe.get_doc('User', user).get_blocked_modules() for icon in user_icons: if icon.module_name in user_blocked_modules: icon.hidden = 1 # sort by idx user_icons.sort(lambda a, b: 1 if a.idx > b.idx else -1) # translate for d in user_icons: if d.label: d.label = _(d.label) frappe.cache().hset('desktop_icons', user, user_icons) return user_icons @frappe.whitelist() def add_user_icon(_doctype, _report=None, label=None, link=None, type='link', standard=0): '''Add a new user desktop icon to the desktop''' if not label: label = _doctype or _report if not link: link = 'List/{0}'.format(_doctype) # find if a standard icon exists icon_name = frappe.db.exists('Desktop Icon', {'standard': standard, 'link': link, 'owner': frappe.session.user}) if icon_name: if frappe.db.get_value('Desktop Icon', icon_name, 'hidden'): # if it is hidden, unhide it frappe.db.set_value('Desktop Icon', icon_name, 'hidden', 0) clear_desktop_icons_cache() else: idx = frappe.db.sql('select max(idx) from `tabDesktop Icon` where owner=%s', frappe.session.user)[0][0] or \ frappe.db.sql('select count(*) from `tabDesktop Icon` where standard=1')[0][0] module = frappe.db.get_value('DocType', _doctype, 'module') module_icon = frappe.get_value('Desktop Icon', {'standard':1, 'module_name':module}, ['name', 'icon', 'color', 'reverse'], as_dict=True) if not frappe.db.get_value("Report", _report): _report = None if not module_icon: module_icon = frappe._dict() opts = random.choice(palette) module_icon.color = opts[0] module_icon.reverse = 0 if (len(opts) > 1) else 1 try: new_icon = frappe.get_doc({ 'doctype': 'Desktop Icon', 'label': label, 'module_name': label, 'link': link, 'type': type, '_doctype': _doctype, '_report': _report, 'icon': module_icon.icon, 'color': module_icon.color, 'reverse': module_icon.reverse, 'idx': idx + 1, 'custom': 1, 'standard': standard }).insert(ignore_permissions=True) clear_desktop_icons_cache() icon_name = new_icon.name except frappe.UniqueValidationError as e: frappe.throw(_('Desktop Icon already exists')) except Exception as e: raise e return icon_name @frappe.whitelist() def set_order(new_order, user=None): '''set new order by duplicating user icons (if user is set) or set global order''' if isinstance(new_order, basestring): new_order = json.loads(new_order) for i, module_name in enumerate(new_order): if module_name not in ('Explore',): if user: icon = get_user_copy(module_name, user) else: name = frappe.db.get_value('Desktop Icon', {'standard': 1, 'module_name': module_name}) if name: icon = frappe.get_doc('Desktop Icon', name) else: # standard icon missing, create one for DocType name = add_user_icon(module_name, standard=1) icon = frappe.get_doc('Desktop Icon', name) icon.db_set('idx', i) clear_desktop_icons_cache() def set_desktop_icons(visible_list, ignore_duplicate=True): '''Resets all lists and makes only the given one standard, if the desktop icon does not exist and the name is a DocType, then will create an icon for the doctype''' # clear all custom frappe.db.sql('delete from `tabDesktop Icon` where standard=0') # set all as blocked frappe.db.sql('update `tabDesktop Icon` set blocked=0, hidden=1') # set as visible if present, or add icon for module_name in visible_list: name = frappe.db.get_value('Desktop Icon', {'module_name': module_name}) if name: frappe.db.set_value('Desktop Icon', name, 'hidden', 0) else: if frappe.db.exists('DocType', module_name): try: add_user_icon(module_name, standard=1) except frappe.UniqueValidationError as e: if not ignore_duplicate: raise e else: visible_list.remove(module_name) if frappe.message_log: frappe.message_log.pop() # set the order set_order(visible_list) clear_desktop_icons_cache() def set_hidden_list(hidden_list, user=None): '''Sets property `hidden`=1 in **Desktop Icon** for given user. If user is None then it will set global values. It will also set the rest of the icons as shown (`hidden` = 0)''' if isinstance(hidden_list, basestring): hidden_list = json.loads(hidden_list) # set as hidden for module_name in hidden_list: set_hidden(module_name, user, 1) # set as seen for module_name in list(set(get_all_icons()) - set(hidden_list)): set_hidden(module_name, user, 0) if user: clear_desktop_icons_cache() else: frappe.clear_cache() def set_hidden(module_name, user=None, hidden=1): '''Set module hidden property for given user. If user is not specified, hide/unhide it globally''' if user: icon = get_user_copy(module_name, user) if hidden and icon.custom: frappe.delete_doc(icon.doctype, icon.name, ignore_permissions=True) return # hidden by user icon.db_set('hidden', hidden) else: icon = frappe.get_doc('Desktop Icon', {'standard': 1, 'module_name': module_name}) # blocked is globally hidden icon.db_set('blocked', hidden) def get_all_icons(): return [d.module_name for d in frappe.get_all('Desktop Icon', filters={'standard': 1}, fields=['module_name'])] def clear_desktop_icons_cache(user=None): frappe.cache().hdel('desktop_icons', user or frappe.session.user) frappe.cache().hdel('bootinfo', user or frappe.session.user) def get_user_copy(module_name, user=None): '''Return user copy (Desktop Icon) of the given module_name. If user copy does not exist, create one. :param module_name: Name of the module :param user: User for which the copy is required (optional) ''' if not user: user = frappe.session.user desktop_icon_name = frappe.db.get_value('Desktop Icon', {'module_name': module_name, 'owner': user, 'standard': 0}) if desktop_icon_name: return frappe.get_doc('Desktop Icon', desktop_icon_name) else: return make_user_copy(module_name, user) def make_user_copy(module_name, user): '''Insert and return the user copy of a standard Desktop Icon''' standard_name = frappe.db.get_value('Desktop Icon', {'module_name': module_name, 'standard': 1}) if not standard_name: frappe.throw(_('{0} not found').format(module_name), frappe.DoesNotExistError) original = frappe.get_doc('Desktop Icon', standard_name) desktop_icon = frappe.get_doc({ 'doctype': 'Desktop Icon', 'standard': 0, 'owner': user, 'module_name': module_name }) for key in ('app', 'label', 'route', 'type', '_doctype', 'idx', 'reverse', 'force_show'): if original.get(key): desktop_icon.set(key, original.get(key)) desktop_icon.insert(ignore_permissions=True) return desktop_icon def sync_desktop_icons(): '''Sync desktop icons from all apps''' for app in frappe.get_installed_apps(): sync_from_app(app) def sync_from_app(app): '''Sync desktop icons from app. To be called during install''' try: modules = frappe.get_attr(app + '.config.desktop.get_data')() or {} except ImportError: return [] if isinstance(modules, dict): modules_list = [] for m, desktop_icon in iteritems(modules): desktop_icon['module_name'] = m modules_list.append(desktop_icon) else: modules_list = modules for i, m in enumerate(modules_list): desktop_icon_name = frappe.db.get_value('Desktop Icon', {'module_name': m['module_name'], 'app': app, 'standard': 1}) if desktop_icon_name: desktop_icon = frappe.get_doc('Desktop Icon', desktop_icon_name) else: # new icon desktop_icon = frappe.get_doc({ 'doctype': 'Desktop Icon', 'idx': i, 'standard': 1, 'app': app, 'owner': 'Administrator' }) if 'doctype' in m: m['_doctype'] = m.pop('doctype') desktop_icon.update(m) desktop_icon.save() return modules_list palette = ( ('#FFC4C4',), ('#FFE8CD',), ('#FFD2C2',), ('#FF8989',), ('#FFD19C',), ('#FFA685',), ('#FF4D4D', 1), ('#FFB868',), ('#FF7846', 1), ('#A83333', 1), ('#A87945', 1), ('#A84F2E', 1), ('#D2D2FF',), ('#F8D4F8',), ('#DAC7FF',), ('#A3A3FF',), ('#F3AAF0',), ('#B592FF',), ('#7575FF', 1), ('#EC7DEA', 1), ('#8E58FF', 1), ('#4D4DA8', 1), ('#934F92', 1), ('#5E3AA8', 1), ('#EBF8CC',), ('#FFD7D7',), ('#D2F8ED',), ('#D9F399',), ('#FFB1B1',), ('#A4F3DD',), ('#C5EC63',), ('#FF8989', 1), ('#77ECCA',), ('#7B933D', 1), ('#A85B5B', 1), ('#49937E', 1), ('#FFFACD',), ('#D2F1FF',), ('#CEF6D1',), ('#FFF69C',), ('#A6E4FF',), ('#9DECA2',), ('#FFF168',), ('#78D6FF',), ('#6BE273',), ('#A89F45', 1), ('#4F8EA8', 1), ('#428B46', 1) )
import os import matplotlib.pyplot as plt def save(path, ext='png', close=True, verbose=True): """Save a figure from pyplot. Parameters ---------- path : string The path (and filename, without the extension) to save the figure to. ext : string (default='png') The file extension. This must be supported by the active matplotlib backend (see matplotlib.backends module). Most backends support 'png', 'pdf', 'ps', 'eps', and 'svg'. close : boolean (default=True) Whether to close the figure after saving. If you want to save the figure multiple times (e.g., to multiple formats), you should NOT close it in between saves or you will have to re-plot it. verbose : boolean (default=True) Whether to print information about when and where the image has been saved. """ # Extract the directory and filename from the given path directory = os.path.split(path)[0] filename = "%s.%s" % (os.path.split(path)[1], ext) if directory == '': directory = '.' # If the directory does not exist, create it if not os.path.exists(directory): os.makedirs(directory) # The final path to save to savepath = os.path.join(directory, filename) if verbose: print("Saving figure to '%s'..." % savepath), # Actually save the figure plt.savefig(savepath,bbox_inches='tight') # Close it if close: plt.close() if verbose: print("Done") def get_object_index(morphline,morphnum): furniture_axes = ['bedChair', 'bedTable', 'benchBed', 'chairBench', 'chairTable', 'tableBench'] car_axes = ['limoToSUV','limoToSedan','limoToSmart','smartToSedan','suvToSedan','suvToSmart'] furniture_items = ['bed','bench','chair','table'] car_items = ['limo','sedan','smartcar','SUV'] endpoints = mdr_helpers.getEndpoints(morphline) morphnum = float(morphnum) whichEndpoint = int(np.round(morphnum/100)) thing = endpoints[whichEndpoint] if morphline in furniture_axes: return furniture_items.index(thing)+1 elif morphline in car_axes: return car_items.index(thing)+1 def getEndpoints(morphline): if morphline=='sedanMinivan': return ['sedan','minivan'] elif morphline=='minivanSportscar': return ['minivan','sportscar'] elif morphline=='sportscarSUV': return ['sportscar','SUV'] elif morphline=='SUVMinivan': return ['SUV','minivan'] elif morphline=='sportscarSedan': return ['sportscar','sedan'] elif morphline=='sedanSUV': return ['sedan','SUV'] elif morphline=='bedChair': return ['bed','chair'] elif morphline=='bedTable': return ['bed','table'] elif morphline=='benchBed': return ['bench','bed'] elif morphline=='chairBench': return ['chair','bench'] elif morphline=='chairTable': return ['chair','table'] elif morphline=='tableBench': return ['table','bench'] elif morphline=='limoToSUV': return ['limo','SUV'] elif morphline=='limoToSedan': return ['sedan','limo'] elif morphline=='limoToSmart': return ['limo','smartcar'] elif morphline=='smartToSedan': return ['smartcar','sedan'] elif morphline=='suvToSedan': return ['SUV','sedan'] elif morphline=='suvToSmart': return ['SUV','smartcar'] else: return ['A','B'] def triple_sum(X): return sum(sum(sum(X))) def get_mask_array(mask_path): mask_img = image.load_img(mask_path) mask_data = mask_img.get_data() num_brain_voxels = sum(sum(sum(mask_data==1))) return mask_data, num_brain_voxels def load_roi_mask(subj,run_num,roi): mask_path = proj_dir + subj +'/analysis/firstlevel/rois/' + roi + '_func__' + str(run_num) + '_binarized.nii.gz' mask_data, nv = get_mask_array(mask_path) return mask_data def load_roi_mask_combined(subj,run_num,roi): if run_num in [1,2]: phase_num = '12' elif run_num in [3,4]: phase_num = '34' elif run_num in [5,6]: phase_num = '56' mask_path = proj_dir + '/' + subj +'/analysis/firstlevel/rois/' + roi + '_func_combined_' + phase_num + '_binarized.nii.gz' mask_data, nv = get_mask_array(mask_path) return mask_data def normalize(X): mn = X.mean(0) sd = X.std(0) X = X - mn X = X / np.maximum(sd, 1e-5) return X def load_single_run_weights(subj,run_num,cope_num): nifti_path = proj_dir + '/' + subj + '/analysis/firstlevel/glm4_recognition_run_' + str(run_num) + \ '.feat/stats/' + 'cope' + str(cope_num) + '.nii.gz' fmri_img = image.load_img(nifti_path) fmri_data = fmri_img.get_data() return fmri_data def apply_mask(data,mask): return data[mask==1] def load_data_and_apply_mask(subj,run_num,roi,cope_num): mask = load_roi_mask_combined(subj,run_num,roi) vol = load_single_run_weights(subj,run_num,cope_num) vec = apply_mask(vol,mask) return vec def extract_obj_by_voxel_run_mat(this_sub,run_num, roi): cope1 = load_data_and_apply_mask(this_sub,run_num,roi,1) cope2 = load_data_and_apply_mask(this_sub,run_num,roi,2) cope3 = load_data_and_apply_mask(this_sub,run_num,roi,3) cope4 = load_data_and_apply_mask(this_sub,run_num,roi,4) return np.vstack((cope1,cope2,cope3,cope4)) def plot_phase_RSM(this_sub,roi,phase): ''' e.g., plot_phase_RSM(this_sub,'fusiform','pre') ''' if phase=='pre': mat1 = extract_obj_by_voxel_run_mat(this_sub,3,roi) mat2 = extract_obj_by_voxel_run_mat(this_sub,4,roi) elif phase=='post': mat1 = extract_obj_by_voxel_run_mat(this_sub,5,roi) mat2 = extract_obj_by_voxel_run_mat(this_sub,6,roi) stacked = np.vstack((mat1,mat2)) plt.matshow(np.corrcoef(stacked)) plt.colorbar() def extract_condition_by_voxel_run_mat(this_sub,run_num, roi): w = this_sub these = coll.find({'wID': w}).sort('trialNum') versionNum = these[0]['versionNum'] design = [i for i in mdtd if i['version'] == int(versionNum)] # find which axes belong to which condition trained = design[0]['trained'] near = design[0]['near'] far1 = design[0]['far1'] far2 = design[0]['far2'] Tep = getEndpoints(trained) Nep = getEndpoints(near) condorder = Tep + Nep slot1 = load_data_and_apply_mask(this_sub,run_num,roi,obj2cope[condorder[0]]) slot2 = load_data_and_apply_mask(this_sub,run_num,roi,obj2cope[condorder[1]]) slot3 = load_data_and_apply_mask(this_sub,run_num,roi,obj2cope[condorder[2]]) slot4 = load_data_and_apply_mask(this_sub,run_num,roi,obj2cope[condorder[3]]) return np.vstack((slot1,slot2,slot3,slot4)) def remove_nans(array): return array[~np.isnan(array)] def rmse(a): return np.sqrt(np.mean(map(np.square,a))) def betwitdist(a,b,ab): return ab/np.sqrt(0.5*(a**2+b**2)) def norm_hist(data,bins): weights = np.ones_like(data)/float(len(data)) plt.hist(data, bins=bins, weights=weights) def compare_btw_wit_obj_similarity_across_runs(this_sub,phase,roi): if phase=='pre': mat1 = extract_obj_by_voxel_run_mat(this_sub,3,roi) mat2 = extract_obj_by_voxel_run_mat(this_sub,4,roi) elif phase=='post': mat1 = extract_obj_by_voxel_run_mat(this_sub,5,roi) mat2 = extract_obj_by_voxel_run_mat(this_sub,6,roi) fAB = np.vstack((mat1,mat2)) # stack feature matrices DAB = sklearn.metrics.pairwise.pairwise_distances(fAB, metric='correlation') # square matrix, where off-diagblock is distances *between* fA and fB vectors offblock = DAB[:len(mat1),range(len(mat1),shape(DAB)[1])] wit_obj = DAB[:len(mat1),range(len(mat1),shape(DAB)[1])].diagonal() btw_obj = np.hstack((offblock[np.triu_indices(shape(offblock)[0],k=1)],offblock[np.tril_indices(shape(offblock)[0],k=-1)])) wit_mean = wit_obj.mean() btw_mean = btw_obj.mean() return wit_mean,btw_mean def compare_btw_wit_cond_similarity_across_runs(this_sub,phase,roi): if phase=='pre': mat1 = extract_condition_by_voxel_run_mat(this_sub,3,roi) mat2 = extract_condition_by_voxel_run_mat(this_sub,4,roi) elif phase=='post': mat1 = extract_condition_by_voxel_run_mat(this_sub,5,roi) mat2 = extract_condition_by_voxel_run_mat(this_sub,6,roi) fAB = np.vstack((mat1,mat2)) # stack feature matrices DAB = sklearn.metrics.pairwise.pairwise_distances(fAB, metric='correlation') # square matrix, where off-diagblock is distances *between* fA and fB vectors offblock = DAB[:len(mat1),range(len(mat1),shape(DAB)[1])] trained_witobj = offblock.diagonal()[:2] control_witobj = offblock.diagonal()[2:] trained_btwobj = np.array([offblock[:2,:2][0,1], offblock[:2,:2][1,0]]) control_btwobj = np.array([offblock[2:,2:][0,1],offblock[2:,2:][1,0]]) trawit_mean = trained_witobj.mean() conwit_mean = control_witobj.mean() trabtw_mean = trained_btwobj.mean() conbtw_mean = control_btwobj.mean() return trawit_mean,conwit_mean,trabtw_mean,conbtw_mean
# Copyright (c) Charl P. Botha, TU Delft. # All rights reserved. # See COPYRIGHT for details. """ """ import mutex ######################################################################### class SchedulerException(Exception): pass class CyclesDetectedException(SchedulerException): pass ######################################################################### class SchedulerModuleWrapper: """Wrapper class that adapts module instance to scheduler-usable object. We can use this to handle exceptions, such as the viewer split. Module instances are wrapped on an ad hoc basis, so you CAN'T use equality testing or 'in' tests to check for matches. Use the L{matches} method. @ivar instance: the module instance, e.g. instance of child of ModuleBase @ivar input_independent_part: part of module that is not input dependent, e.g. in the case of purely interaction-dependent outputs @ivar input_independent_outputs: list of outputs that are input-dependent. This has to be set for both dependent and independent parts of a module. @todo: functionality in this class has been reduced to such an extent that we should throw it OUT in favour of just working with (meta_module, part) tuples. These we CAN use for hashing and equality tests. @author: Charl P. Botha <http://cpbotha.net/> """ def __init__(self, meta_module, part): self.meta_module = meta_module self.part = part def matches(self, otherModule): """Checks if two schedulerModules are equivalent. Module instances are wrapped with this class on an ad hoc basis, so you can not check for equivalency with the equality or 'in' operators for example. Use this method instead. @param otherModule: module with which equivalency should be tested. @return: True if equivalent, False otherwise. """ eq = self.meta_module == otherModule.meta_module and \ self.part == otherModule.part return eq ######################################################################### class Scheduler: """Coordinates event-driven network execution. DeVIDE currently supports two main scheduling modes: event-driven and demand-driven. [1] contains a concise overview of the scheduling approach, but we'll go into some more detail in this in-code documentation. Event-driven scheduling: This is the default scheduling mode - the network is analysed and all modules are iterated through in topological order. For each module, its inputs are transferred from its producer modules if necessary (i.e. a producer module has been executed since the previous transfer, or this (consumer) module has been newly connected (in which case the producer module's output t-time to this module is set to 0)). All transfers are timestamped. In event-driven mode, after every transfer, the streaming transfer timestamp for that connection is set to 0 so that subsequent hybrid scheduling runs will re-transfer all relevant data. If the module has been modified, or inputs have been transferred to it (in which case it is also explicitly modified), its execute_module() method is then called. Hybrid scheduling: This mode of scheduling has to be explicitly invoked by the user. All modules with a streaming_execute_module() are considered streamable. The largest subsets of streamable modules are found (see [1] for details on this algorithm). All modules are iterated through in topological order and execution continues as for event-driven scheduling, except when a streamable module is encountered. In that case, we use a different set of streaming_transfer_times to check whether we should transfer its producers' output data pointers (WITHOUT disconnect workaround). In every case that we do a transfer, the usual transfer timestamps are set to 0 so that any subsequent event-driven scheduling will re-transfer. For each re-transfer, the module will be modified, thus also causing a re-execute when we change to event-driven mode. Only if the current streamable module is at one of the end points of the streamable subset and its execute_timestamp is older than the normal modification time-stamp, is its streaming_execute_module() method called and the streaming_execute_timestamp touched. Timestamps: There are four collections of timestamps: 1. per module modified_time (initvalue 0) 2. per module execute_time (initvalue 0) 3. per output connection transfer_time 4. per module streaming touch time (initvalue 0) When a module's configuration is changed by the user (the user somehow interacts with the module), the module's modified_time is set to current_time. When a module execution is scheduled: * For each supplying connection, the data is transferred if transfer_time(connection) < execute_time(producer_module), or in the hybrid case, if transfer_time(connection) < touch_time(producer_module) * If data is transferred to a module, that module's modified_time is set to current_time. * The module is then executed if modified_time > execute_time. * If the module is executed, execute_time is set to current_time. Notes: * there are two sets of transfer_time timestamps, one set each for event-driven and hybrid * there is only ONE set of modified times and of execute_times * See the timestamp description above, as well as the descriptions for hybrid and event-driven to see how the scheduler makes sure that switching between execution models automatically results in re-execution of modules that are adaptively scheduled. * in the case that illegal cycles are found, network execution is aborted. [1] C.P. Botha and F.H. Post, "Hybrid Scheduling in the DeVIDE Dataflow Visualisation Environment", accepted for SimVis 2008 This should be a singleton, as we're using a mutex to protect per- process network execution. @author: Charl P. Botha <http://cpbotha.net/> """ _execute_mutex = mutex.mutex() def __init__(self, devideApp): """Initialise scheduler instance. @param devideApp: an instance of the devideApplication that we'll use to communicate with the outside world. """ self._devideApp = devideApp def meta_modules_to_scheduler_modules(self, meta_modules): """Preprocess module instance list before cycle detection or topological sorting to take care of exceptions. Note that the modules are wrapped anew by this method, so equality tests with previously existing scheduleModules will not work. You have to use the L{SchedulerModuleWrapper.matches()} method. @param module_instances: list of raw module instances @return: list with SchedulerModuleWrappers """ # replace every view module with two segments: final and initial SchedulerModuleWrappers = [] for mModule in meta_modules: # wrap every part separately for part in range(mModule.numParts): SchedulerModuleWrappers.append( SchedulerModuleWrapper(mModule, part)) return SchedulerModuleWrappers def getConsumerModules(self, schedulerModule): """Return consumers of schedulerModule as a list of schedulerModules. The consumers that are returned have been wrapped on an ad hoc basis, so you can't trust normal equality or 'in' tests. Use the L{SchedulerModuleWrapper.matches} method instead. @param schedulerModule: determine modules that are connected to outputs of this instance. @param part: Only return modules that are dependent on this part. @return: list of consumer schedulerModules, ad hoc wrappings. """ # get the producer meta module p_meta_module = schedulerModule.meta_module # only consumers that are dependent on p_part are relevant p_part = schedulerModule.part # consumers is a list of (output_idx, consumerMetaModule, # consumerInputIdx) tuples mm = self._devideApp.get_module_manager() consumers = mm.get_consumers(p_meta_module) sConsumers = [] for output_idx, consumerMetaModule, consumerInputIdx in consumers: if p_meta_module.getPartForOutput(output_idx) == p_part: # now see which part of the consumerMetaModule is dependent cPart = consumerMetaModule.getPartForInput(consumerInputIdx) sConsumers.append( SchedulerModuleWrapper(consumerMetaModule, cPart)) return sConsumers def getProducerModules(self, schedulerModule): """Return producer modules and indices that supply schedulerModule with data. The producers that are returned have been wrapped on an ad hoc basis, so you can't trust normal equality or 'in' tests. Use the L{SchedulerModuleWrapper.matches} method instead. @param schedulerModule: determine modules that are connected to inputs of this instance. @return: list of tuples with (producer schedulerModule, output index, consumer input index). """ # get the consumer meta module c_meta_module = schedulerModule.meta_module # only producers that supply this part are relevant c_part = schedulerModule.part # producers is a list of (producerMetaModule, output_idx, input_idx) # tuples mm = self._devideApp.get_module_manager() producers = mm.get_producers(c_meta_module) sProducers = [] for p_meta_module, outputIndex, consumerInputIdx in producers: if c_meta_module.getPartForInput(consumerInputIdx) == c_part: # find part of producer meta module that is actually # producing for schedulerModule p_part = p_meta_module.getPartForOutput(outputIndex) sProducers.append( (SchedulerModuleWrapper(p_meta_module, p_part), outputIndex, consumerInputIdx)) return sProducers def detectCycles(self, schedulerModules): """Given a list of moduleWrappers, detect cycles in the topology of the modules. @param schedulerModules: list of module instances that has to be checked. @return: True if cycles detected, False otherwise. @todo: check should really be limited to modules in selection. """ def detectCycleMatch(visited, currentModule): """Recursive function used to check for cycles in the module network starting from initial module currentModule. @param visited: list of schedulerModules used during recursion. @param currentModule: initial schedulerModule @return: True if cycle detected starting from currentModule """ consumers = self.getConsumerModules(currentModule) for consumer in consumers: for v in visited: if consumer.matches(v): return True else: # we need to make a copy of visited and send it along # if we don't, changes to visit are shared between # different branches of the recursion; we only want # it to aggregate per recursion branch visited_copy = {} visited_copy.update(visited) visited_copy[consumer] = 1 if detectCycleMatch(visited_copy, consumer): return True # the recursion ends when there are no consumers and return False for schedulerModule in schedulerModules: if detectCycleMatch({schedulerModule : 1}, schedulerModule): return True return False def topoSort(self, schedulerModules): """Perform topological sort on list of modules. Given a list of module instances, this will perform a topological sort that can be used to determine the execution order of the give modules. The modules are checked beforehand for cycles. If any cycles are found, an exception is raised. @param schedulerModules: list of module instance to be sorted @return: modules in topological order; in this case the instances DO match the input instances. @todo: separate topologically independent trees """ def isFinalVertex(schedulerModule, currentList): """Determines whether schedulerModule is a final vertex relative to the currentList. A final vertex is a vertex/module with no consumers in the currentList. @param schedulerModule: module whose finalness is determined @param currentList: list relative to which the finalness is determined. @return: True if final, False if not. """ # find consumers consumers = self.getConsumerModules(schedulerModule) # now check if any one of these consumers is present in currentList for consumer in consumers: for cm in currentList: if consumer.matches(cm): return False return True if self.detectCycles(schedulerModules): raise CyclesDetectedException( 'Cycles detected in network. Unable to schedule.') # keep on finding final vertices, move to final list scheduleList = [] # this will be the actual schedules list tempList = schedulerModules[:] # copy of list so we can futz around while tempList: finalVertices = [sm for sm in tempList if isFinalVertex(sm, tempList)] scheduleList.extend(finalVertices) for fv in finalVertices: tempList.remove(fv) scheduleList.reverse() return scheduleList def execute_modules(self, schedulerModules): """Execute the modules in schedulerModules in topological order. For each module, all output is transferred from its consumers and then it's executed. I'm still thinking about the implications of doing this the other way round, i.e. each module is executed and its output is transferred. Called by SchedulerProxy.execute_modules(). @param schedulerModules: list of modules that should be executed in order. @raise CyclesDetectedException: This exception is raised if any cycles are detected in the modules that have to be executed. @todo: add start_module parameter, execution skips all modules before this module in the topologically sorted execution list. """ # stop concurrent calls of execute_modules. if not Scheduler._execute_mutex.testandset(): return # first remove all blocked modules from the list, before we do any # kind of analysis. blocked_module_indices = [] for i in range(len(schedulerModules)): if schedulerModules[i].meta_module.blocked: blocked_module_indices.append(i) blocked_module_indices.reverse() for i in blocked_module_indices: del(schedulerModules[i]) # finally start with execution. try: if self.detectCycles(schedulerModules): raise CyclesDetectedException( 'Cycles detected in selected network modules. ' 'Unable to execute.') # this will also check for cycles... schedList = self.topoSort(schedulerModules) mm = self._devideApp.get_module_manager() for sm in schedList: print "### sched:", sm.meta_module.instance.__class__.__name__ # find all producer modules producers = self.getProducerModules(sm) # transfer relevant data for pmodule, output_index, input_index in producers: if mm.should_transfer_output( pmodule.meta_module, output_index, sm.meta_module, input_index): print 'transferring output: %s:%d to %s:%d' % \ (pmodule.meta_module.instance.__class__.__name__, output_index, sm.meta_module.instance.__class__.__name__, input_index) mm.transfer_output(pmodule.meta_module, output_index, sm.meta_module, input_index) # finally: execute module if # ModuleManager thinks it's necessary if mm.should_execute_module(sm.meta_module, sm.part): print 'executing part %d of %s' % \ (sm.part, sm.meta_module.instance.__class__.__name__) mm.execute_module(sm.meta_module, sm.part) finally: # in whichever way execution terminates, we have to unlock the # mutex. Scheduler._execute_mutex.unlock() ######################################################################### class EventDrivenScheduler(Scheduler): pass ######################################################################### class HybridScheduler(Scheduler): def execute_modules(self, schedulerModules): """Execute the modules in schedulerModules according to hybrid scheduling strategy. See documentation in Scheduler class and the paper [1] for a complete description. @param schedulerModules: list of modules that should be executed in order. @raise CyclesDetectedException: This exception is raised if any cycles are detected in the modules that have to be executed. @todo: add start_module parameter, execution skips all modules before this module in the topologically sorted execution list. """ # stop concurrent calls of execute_modules. if not Scheduler._execute_mutex.testandset(): return # first remove all blocked modules from the list, before we do any # kind of analysis. blocked_module_indices = [] for i in range(len(schedulerModules)): if schedulerModules[i].meta_module.blocked: blocked_module_indices.append(i) blocked_module_indices.reverse() for i in blocked_module_indices: del(schedulerModules[i]) # finally start with execution. try: if self.detectCycles(schedulerModules): raise CyclesDetectedException( 'Cycles detected in selected network modules. ' 'Unable to execute.') # this will also check for cycles... schedList = self.topoSort(schedulerModules) mm = self._devideApp.get_module_manager() # find largest streamable subsets streamables_dict, streamable_subsets = \ self.find_streamable_subsets(schedulerModules) for sm in schedList: smt = (sm.meta_module, sm.part) if smt in streamables_dict: streaming_module = True print "### streaming ", else: streaming_module = False print "### ", print "sched:", sm.meta_module.instance.__class__.__name__ # find all producer modules producers = self.getProducerModules(sm) # transfer relevant data for pmodule, output_index, input_index in producers: pmt = (pmodule.meta_module, pmodule.part) if streaming_module and pmt in streamables_dict: streaming_transfer = True else: streaming_transfer = False if mm.should_transfer_output( pmodule.meta_module, output_index, sm.meta_module, input_index, streaming_transfer): if streaming_transfer: print 'streaming ', print 'transferring output: %s:%d to %s:%d' % \ (pmodule.meta_module.instance.__class__.__name__, output_index, sm.meta_module.instance.__class__.__name__, input_index) mm.transfer_output(pmodule.meta_module, output_index, sm.meta_module, input_index, streaming_transfer) # finally: execute module if # ModuleManager thinks it's necessary if streaming_module: if streamables_dict[smt] == 2: # terminating module in streamable subset if mm.should_execute_module(sm.meta_module, sm.part): print 'streaming executing part %d of %s' % \ (sm.part, \ sm.meta_module.instance.__class__.__name__) mm.execute_module(sm.meta_module, sm.part, streaming=True) # if the module has been # streaming_executed, it has also been # touched. sm.meta_module.streaming_touch_timestamp_module(sm.part) # make sure we touch the module even if we don't # execute it. this is used in the transfer # caching elif sm.meta_module.should_touch(sm.part): sm.meta_module.streaming_touch_timestamp_module(sm.part) else: # this is not a streaming module, normal semantics if mm.should_execute_module(sm.meta_module, sm.part): print 'executing part %d of %s' % \ (sm.part, \ sm.meta_module.instance.__class__.__name__) mm.execute_module(sm.meta_module, sm.part) finally: # in whichever way execution terminates, we have to unlock the # mutex. Scheduler._execute_mutex.unlock() def find_streamable_subsets(self, scheduler_modules): """ Algorithm for finding streamable subsets in a network. Also see Algorithm 2 in the paper [1]. @param scheduler_modules: topologically sorted list of SchedulerModuleWrapper instances (S). @return: dictionary of streamable MetaModule bindings (V_ss) mapping to 1 (non-terminating) or 2 (terminating) and list of streamable subsets, each an array (M_ss). """ # get all streaming modules from S and keep topological # ordering (S_s == streaming_scheduler_modules) streamable_modules = [] streamable_modules_dict = {} for sm in scheduler_modules: if hasattr(sm.meta_module.instance, 'streaming_execute_module'): streamable_modules.append((sm.meta_module, sm.part)) # we want to use this to check for streamability later streamable_modules_dict[(sm.meta_module, sm.part)] = 1 # now the fun begins: streamables_dict = {} # this is V_ss streamable_subsets = [] # M_ss def handle_new_streamable(smt, streamable_subset): """Recursive method to do depth-first search for largest streamable subset. This is actually the infamous line 9 in the article. @param: smt is a streamable module tuple (meta_module, part) """ # get all consumers of sm # getConsumerModules returns ad hoc wrappings! sm = SchedulerModuleWrapper(smt[0], smt[1]) consumers = self.getConsumerModules(sm) # if there are no consumers, per def a terminating module if len(consumers) == 0: terminating = True else: # check if ANY of the the consumers is non-streamable # in which case sm is also terminating terminating = False for c in consumers: if (c.meta_module,c.part) not in \ streamable_modules_dict: terminating = True break if terminating: # set sm as the terminating module streamables_dict[smt] = 2 else: # add all consumers to streamable_subset M ctuples = [(i.meta_module, i.part) for i in consumers] streamable_subset.append(ctuples) # also add them all to V_ss streamables_dict.fromkeys(ctuples, 1) for c in consumers: handle_new_streamable((c.meta_module, c.part), streamable_subset) # smt is a streamable module tuple (meta_module, part) for smt in streamable_modules: if not smt in streamables_dict: # this is a NEW streamable module! # create new streamable subset streamable_subset = [smt] streamables_dict[smt] = 1 # handle this new streamable handle_new_streamable(smt, streamable_subset) # handle_new_streamable recursion is done, add # this subset list of subsets streamable_subsets.append(streamable_subset) return streamables_dict, streamable_subsets ######################################################################### class SchedulerProxy: """Proxy class for all schedulers. Each scheduler mode is represented by a different class, but we want to use a common instance to access functionality, hence this proxy. """ EVENT_DRIVEN_MODE = 0 HYBRID_MODE = 1 def __init__(self, devide_app): self.event_driven_scheduler = EventDrivenScheduler(devide_app) self.hybrid_scheduler = HybridScheduler(devide_app) # default mode self.mode = SchedulerProxy.EVENT_DRIVEN_MODE def get_scheduler(self): """Return the correct scheduler instance, dependent on the current mode. """ s = [self.event_driven_scheduler, self.hybrid_scheduler][self.mode] return s def execute_modules(self, scheduler_modules): """Thunks through to the correct scheduler instance's execute_modules. This is called by NetworkManager.execute_network() """ self.get_scheduler().execute_modules(scheduler_modules) def meta_modules_to_scheduler_modules(self, meta_modules): return self.get_scheduler().meta_modules_to_scheduler_modules(meta_modules)
import geopandas as gpd import pandas import numpy import os import matplotlib import matplotlib.pyplot as plt import seaborn as sns local_palattes = { 'radar': ['deep purple', 'deep green', 'green', 'green', 'yellow', 'orange',] } def color_palette_alpha(palatte='Reds', low_alpha=0.0, high_alpha=1.0): if palatte in local_palattes: return color_list_alpha(local_palattes[palatte], low_alpha=low_alpha, high_alpha=high_alpha, name=palatte) c = sns.color_palette(palatte) from matplotlib.colors import LinearSegmentedColormap return LinearSegmentedColormap.from_list( name=palatte+"_gen", colors=[ (*i, j) for i,j in zip( c, numpy.linspace(low_alpha, high_alpha, len(c)) ) ] ) def color_list_alpha(colorlist=(), low_alpha=0.0, high_alpha=1.0, name="_gen"): if colorlist==(): colorlist = ['deep purple', 'deep green', 'green', 'green', 'yellow', 'orange',] c = [sns.colors.xkcd_rgb[i] for i in colorlist] from matplotlib.colors import LinearSegmentedColormap return LinearSegmentedColormap.from_list( name=name, colors=[ (int(i[1:3],16)/256, int(i[3:5],16)/256, int(i[5:7],16)/256, j) for i,j in zip( c, numpy.linspace(low_alpha, high_alpha, len(c)) ) ] ) local_cmaps = { k: color_list_alpha(local_palattes[k], 0.0,1.0,k) for k in local_palattes } from geopandas import read_file # convenience class Map: def __init__(self, center=(None,None), extent=(None,None), xlim=None, ylim=None, xticks=None, yticks=None, title=None, bgcolor=None, height=None, width=None, frame=None): fig, ax = plt.subplots() ax.set_aspect('equal') if center[0] and extent[0]: ax.set_xlim(center[0]-extent[0], center[0]+extent[0]) elif xlim is not None: ax.set_xlim(*xlim) if center[1] and extent[1]: ax.set_ylim(center[1]-extent[1], center[1]+extent[1]) elif ylim is not None: ax.set_ylim(*ylim) if xticks is None or xticks is False: ax.set_xticks([]) elif xticks is not True: ax.set_xticks(xticks) if yticks is None or yticks is False: ax.set_yticks([]) elif yticks is not True: ax.set_yticks(yticks) if bgcolor is not None: fig.patch.set_facecolor(bgcolor) self.ax = ax self.fig = fig self.set_title(title) if height is not None: self.fig.set_figheight(height) if width is not None: self.fig.set_figwidth(width) if frame is not None: self.ax.set_frame_on(frame) def set_title(self, title): self._title = title if title is not None: self.ax.set_title(title) return self def __repr__(self): if self._title: return f"<pines.geoviz.Map: {self._title}>" else: return f"<pines.geoviz.Map: Untitled>" def get_png(self, *args, close_after=True, **kwargs): import io buf = io.BytesIO() kwargs.pop('format', None) bbox_inches = kwargs.pop('bbox_inches', 'tight') self.fig.savefig(buf, format='png', bbox_inches=bbox_inches, *args, **kwargs) if close_after: plt.close(self.fig.number) return buf.getvalue() def choropleth( self, gdf:gpd.GeoDataFrame, column, cmap=None, legend=True, vmin=None, vmax=None, labels=None, colorbar_fraction = 0.046, colorbar_pad = 0.04, colorbar_shrink = 0.75, **kwargs, ): if legend == 'manual': manual_legend = True legend = False else: manual_legend = False y = gdf.plot( ax=self.ax, column=column, cmap=cmap, legend=legend, vmin=vmin, vmax=vmax, **kwargs ) if manual_legend: mn = gdf[column].min() if vmin is None else vmin mx = gdf[column].max() if vmax is None else vmax from matplotlib.colors import Normalize from matplotlib import cm norm = Normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) n_cmap.set_array([]) self.fig.colorbar(n_cmap, fraction=colorbar_fraction, pad=colorbar_pad, shrink=colorbar_shrink) if labels is not None: from seaborn.utils import relative_luminance areacolors = y.collections[0].get_facecolors() label_col = labels.pop('column') formatter = labels.pop('formatter', lambda x: x) for r in range(len(gdf)): self.ax.annotate( s=str(formatter(gdf.iloc[r][label_col])), xy=gdf.iloc[r].geometry.representative_point().coords[0], ha='center', va='center', clip_on=True, color=".15" if relative_luminance(areacolors[r]) > .408 else "w", **labels ) return self def invalid_area(self, gdf, color='#000000AA', **kwargs): gdf.plot( ax=self.ax, color=color, **kwargs ) return self def borderlines(self, gdf, edgecolor="#000000FF", weight=1, **kwargs): gdf.plot( ax=self.ax, color="#FFFFFF00", # transparent fill color edgecolor=edgecolor, linewidth=weight, **kwargs ) return self def labels(self, gdf, column, formatter=lambda x:x, **kwargs): if column not in gdf.columns: raise KeyError(f'column "{column}" not in gdf.columns') gdf.apply( lambda x: self.ax.annotate( s=str(formatter(x[column])), xy=x.geometry.centroid.coords[0], ha='center', va='center', clip_on=True, **kwargs ), axis=1 ) return self def kdeplot(self, lat, lon, gridsize=100, bw=.01, legend=False, cmap=None, palatte="Reds", clist=None, **kwargs): if cmap is None: if clist is None: cmap = color_palette_alpha(palatte) else: cmap = color_list_alpha(clist) import seaborn as sns sns.kdeplot( lon, lat, clip=( self.ax.get_xlim(), self.ax.get_ylim(), ), ax=self.ax, shade=True, gridsize=gridsize, bw=bw, shade_lowest=False, cmap=cmap, # LinearSegmentedColormap.from_list('name', [(1, 0, 0, 0), (1, 0, 0, 1/3), (1, 0, 0, 2/3), (0, 1, 0, 1)]), legend=legend, **kwargs ) return self def wkdeplot(self, lat, lon, wgt, gridsize=100, bw=.01, legend=False, palatte="Reds", cmap=None, clist=None, **kwargs): from sklearn.neighbors import KernelDensity if cmap is None: if clist is None: cmap = color_palette_alpha(palatte) else: cmap = color_list_alpha(clist) Xtrain = numpy.vstack([ lat, lon] ).T X, Y = numpy.meshgrid( numpy.linspace(*self.ax.get_xlim(), gridsize), numpy.linspace(*self.ax.get_ylim(), gridsize), ) xy = numpy.vstack([Y.ravel(), X.ravel()]).T kde = KernelDensity(bandwidth=bw, #metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain, sample_weight=wgt) Z = kde.score_samples(xy) Z = Z.reshape(X.shape) levels = numpy.linspace(0, Z.max(), 25) if cmap is None: cmap= color_palette_alpha(palatte) self.ax.contourf( X, Y, Z, levels=levels, cmap=cmap, ) return self def points(self, gdf, color='#BB0000', plotnumber=0, **kwargs): # if self.grid_width == 1 and self.grid_height == 1: # ax = self.axes # else: # ax = self.axes.ravel()[plotnumber] gdf.plot( ax=self.ax, color=color, **kwargs ) return self def colored_points(self, gdf, column, cmap='vidiris', plotnumber=0, **kwargs): # if self.grid_width == 1 and self.grid_height == 1: # ax = self.axes # else: # ax = self.axes.ravel()[plotnumber] gdf.plot( ax=self.ax, column=column, cmap=cmap, **kwargs ) return self class MapMaker: def __init__(self, *args, **kwargs): self._args = args self._kwargs = kwargs def __call__(self, **kwargs): return Map(*self._args, **self._kwargs, **kwargs) def reduce_coordinate_precision_of_shapefile(in_filename, *out_filename, **kwargs): from shapely.geometry import shape, mapping gdf = gpd.read_file(in_filename) for xx in gdf.index: geojson = mapping(gdf.geometry[xx]) geojson['coordinates'] = numpy.round(numpy.array(geojson['coordinates']), 6) gdf.loc[xx, 'geometry'] = shape(geojson) gdf.to_file(*out_filename, **kwargs) def get_distance_matrix(gdf, id_col, filename=None): if filename is not None and os.path.exists(filename): return pandas.read_pickle(filename) distance_matrix = pandas.DataFrame( data=0, index=gdf[id_col], columns=gdf[id_col], dtype=numpy.float64, ) for i in range(len(gdf)): distance_matrix.values[:,i] = gdf.centroid.distance(gdf.centroid[i]) distance_matrix = distance_matrix.sort_index(0).sort_index(1) if filename is not None and not os.path.exists(filename): os.makedirs(os.path.dirname(filename), exist_ok=True) distance_matrix.to_pickle(filename) return distance_matrix
from __future__ import print_function from ctypes import py_object class Array(): """ ctypes array of fixed size """ def __init__(self, *args, **kwargs): if 'size' in kwargs: self.size = kwargs['size'] else: self.size = len(args) if len(args) > self.size: raise OverflowError('Array size is too small') self.array = (self.size * py_object)() for i in range(len(args)): self.array[i] = args[i] for i in range(len(args), self.size): self.array[i] = None def __getitem__(self, index): if index < 0 or index >= self.size: raise IndexError('Array index out of bounds') return self.array[index] def __setitem__(self, index, value): if index < 0 or index >= self.size: raise IndexError('Array index out of bounds') self.array[index] = value class BTreeKey(): """ b tree key that stores word and count """ def __init__(self, word): self.word = word self.count = 0 def __lt__(self, word): return self.word < word def __le__(self, word): return self.word <= word def __eq__(self, word): return self.word == word def __ne__(self, word): return self.word != word def __gt__(self, word): return self.word > word def __ge__(self, word): return self.word >= word class BTreeNode(): """ b tree node """ def __init__(self, t, leaf): self.t = t self.leaf = leaf self.num_keys = 0 self.keys = Array(size=2 * t - 1) self.children = Array(size=2 * t) def is_full(self): """ return if node is full """ return self.num_keys == 2 * self.t - 1 def insert_non_full(self, key): """ insert key into non full node """ index = self.num_keys - 1 if self.leaf: # insert into node using bubble sort while index >= 0 and self.keys[index] > key: self.keys[index + 1] = self.keys[index] index -= 1 # prevent duplicate keys if index >= 0 and self.keys[index] == key: return self.keys[index + 1] = BTreeKey(key) self.num_keys += 1 else: # find subtree key should be inserted while index >= 0 and self.keys[index] > key: index -= 1 if self.children[index + 1].is_full(): self.split_child(index + 1, self.children[index + 1]) if self.keys[index + 1] < key: index += 1 # recursively insert into subtree self.children[index + 1].insert_non_full(key) def split_child(self, index, child): """ split a full child node into two children and transfer one key to parent """ new = BTreeNode(child.t, child.leaf) new.num_keys = self.t - 1 # copy right half keys to new node for i in range(self.t - 1): new.keys[i] = child.keys[i + self.t] # copy right half children to new node if not child.leaf: for i in range(self.t): new.children[i] = child.children[i + self.t] child.num_keys = self.t - 1 # make room for new child node for i in range(self.num_keys, index, -1): self.children[i + 1] = self.children[i] self.children[index + 1] = new # promote key from child to parent (this node) for i in range(self.num_keys - 1, index - 1, -1): self.keys[i + 1] = self.keys[i] self.keys[index] = child.keys[self.t - 1] self.num_keys += 1 def remove(self, key): index = 0 while index < self.num_keys and self.keys[index] < key: index += 1 # if key is in this node if index < self.num_keys and self.keys[index] == key: if self.leaf: for i in range(index + 1, self.num_keys): self.keys[i - 1] = self.keys[i] self.num_keys -= 1 else: key = self.keys[index] # promote max of left tree to left child if self.children[index].num_keys >= self.t: current = self.children[index] while not current.leaf: current = current.children[current.num_keys] before = current.keys[current.num_keys - 1] self.keys[index] = before self.children[index].remove(before) # promote min of left tree to right child elif self.children[index + 1].num_keys >= self.t: current = self.children[index + 1] while not current.leaf: current = current.children[0] after = current.keys[0] self.keys[index] = after self.children[index + 1].remove(after) # if both children don't have enough keys, combine else: self.merge(index) self.children[index].remove(key) else: if self.leaf: # key is not in tree return if index == self.num_keys: last = True else: last = False # not enough keys in child if self.children[index].num_keys < self.t: # steal key from left child if index != 0 and self.children[index - 1].num_keys >= self.t: child = self.children[index] sibling = self.children[index - 1] # make room in parent for key for i in range(child.num_keys - 1, -1, -1): child.keys[i + 1] = child.keys[i] if not child.leaf: for i in range(child.num_keys, -1, -1): child.children[i + 1] = child.children[i] # rotate keys and children child.keys[0] = self.keys[index - 1] if not self.leaf: child.children[0] = sibling.children[sibling.num_keys] self.keys[index - 1] = sibling.keys[sibling.num_keys - 1] child.num_keys += 1 sibling.num_keys -= 1 # steal key from right child elif index != self.num_keys and self.children[index + 1].num_keys >= self.t: child = self.children[index] sibling = self.children[index + 1] child.keys[child.num_keys] = self.keys[index] if not child.leaf: child.children[child.num_keys + 1] = sibling.children[0] self.keys[index] = sibling.keys[0] # move keys back for i in range(1, sibling.num_keys): sibling.keys[i - 1] = sibling.keys[i] # move children back if not sibling.leaf: for i in range(1, sibling.num_keys + 1): sibling.children[i - 1] = sibling.children[i] child.num_keys += 1 sibling.num_keys -= 1 # stealing won't work, merge together else: if index != self.num_keys: self.merge(index) else: self.merge(index - 1) # balance by recursively removing if last and index > self.num_keys: self.children[index - 1].remove(key) else: self.children[index].remove(key) def merge(self, index): child = self.children[index] sibling = self.children[index + 1] # demote key from parent (this node) child.keys[self.t - 1] = self.keys[index] # copy keys from sibling to child for i in range(sibling.num_keys): child.keys[i + self.t] = sibling.keys[i] # copy children from sibling to child if not child.leaf: for i in range(sibling.num_keys + 1): child.children[i + self.t] = sibling.children[i] # fill key space created by demotion for i in range(index + 1, self.num_keys): self.keys[i - 1] = self.keys[i] # unlink sibling from tree for i in range(index + 2, self.num_keys + 1): self.children[i - 1] = self.children[i] child.num_keys += sibling.num_keys + 1 self.num_keys -= 1 def search(self, key): index = 0 while index < self.num_keys and key > self.keys[index]: index += 1 if self.keys[index] == key: self.keys[index].count += 1 return self.keys[index] if self.leaf: return None return self.children[index].search(key) def traverse(self): """ print tree to be visualized using http://mshang.ca/syntree/ """ print('[', end='') # print keys keys = [] for i in range(self.num_keys): keys.append(self.keys[i].word) print(','.join(keys), end='') # print children if not self.leaf: for i in range(self.num_keys + 1): self.children[i].traverse() print(']', end='') def find_max(self): """ find key with maximum word count """ if self.leaf: # return leaf node key with max word count index = 0 for i in range(self.num_keys - 1): if self.keys[i + 1].count > self.keys[i].count: index = i + 1 return self.keys[index] else: # get max count of each child child_maxes = Array(size=self.num_keys + 1) for i in range(self.num_keys + 1): child_maxes[i] = self.children[i].find_max() # get index of child with max count child_index = 0 for i in range(self.num_keys): if child_maxes[i + 1].count > child_maxes[i].count: child_index = i + 1 # get max key at this node index = 0 for i in range(self.num_keys - 1): if self.keys[i + 1].count > self.keys[i].count: index = i if self.keys[index].count > child_maxes[child_index].count: return self.keys[index] else: return child_maxes[child_index] class BTree(): """ an M order b tree """ def __init__(self, m): self.root = None self.size = 0 self.t = m / 2 def insert(self, key): """ insert key into tree """ # tree is empty if not self.root: self.root = BTreeNode(self.t, True) self.root.keys[0] = BTreeKey(key) self.root.num_keys = 1 else: # create a new root if self.root.is_full(): new = BTreeNode(self.t, False) new.children[0] = self.root new.split_child(0, self.root) index = 0 if new.keys[0] < key: index += 1 new.children[index].insert_non_full(key) self.root = new else: self.root.insert_non_full(key) self.size += 1 def remove(self, key): """ remove key from tree """ # tree is empty if not self.root: return self.root.remove(key) # remove root if empty if self.root.num_keys == 0: if self.root.leaf: self.root = None else: self.root = self.root.children[0] def search(self, key): if self.root: return self.root.search(key) return None def traverse(self): if self.root: self.root.traverse() print('') else: print('[]') def find_max(self): """ find key with maximum word count """ if self.root: return self.root.find_max() return None
"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py """ import functools import warnings __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', 'isreal', 'nan_to_num', 'real', 'real_if_close', 'typename', 'asfarray', 'mintypecode', 'common_type'] import numpy.core.numeric as _nx from numpy.core.numeric import asarray, asanyarray, isnan, zeros from numpy.core.overrides import set_module from numpy.core import overrides from .ufunclike import isneginf, isposinf array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') _typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' @set_module('numpy') def mintypecode(typechars, typeset='GDFgdf', default='d'): """ Return the character for the minimum-size type to which given types can be safely cast. The returned type character must represent the smallest size dtype such that an array of the returned type can handle the data from an array of all types in `typechars` (or if `typechars` is an array, then its dtype.char). Parameters ---------- typechars : list of str or array_like If a list of strings, each string should represent a dtype. If array_like, the character representation of the array dtype is used. typeset : str or list of str, optional The set of characters that the returned character is chosen from. The default set is 'GDFgdf'. default : str, optional The default character, this is returned if none of the characters in `typechars` matches a character in `typeset`. Returns ------- typechar : str The character representing the minimum-size type that was found. See Also -------- dtype, sctype2char, maximum_sctype Examples -------- >>> np.mintypecode(['d', 'f', 'S']) 'd' >>> x = np.array([1.1, 2-3.j]) >>> np.mintypecode(x) 'D' >>> np.mintypecode('abceh', default='G') 'G' """ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char for t in typechars) intersection = set(t for t in typecodes if t in typeset) if not intersection: return default if 'F' in intersection and 'd' in intersection: return 'D' return min(intersection, key=_typecodes_by_elsize.index) def _asfarray_dispatcher(a, dtype=None): return (a,) @array_function_dispatch(_asfarray_dispatcher) def asfarray(a, dtype=_nx.float_): """ Return an array converted to a float type. Parameters ---------- a : array_like The input array. dtype : str or dtype object, optional Float type code to coerce input array `a`. If `dtype` is one of the 'int' dtypes, it is replaced with float64. Returns ------- out : ndarray The input `a` as a float ndarray. Examples -------- >>> np.asfarray([2, 3]) array([2., 3.]) >>> np.asfarray([2, 3], dtype='float') array([2., 3.]) >>> np.asfarray([2, 3], dtype='int8') array([2., 3.]) """ if not _nx.issubdtype(dtype, _nx.inexact): dtype = _nx.float_ return asarray(a, dtype=dtype) def _real_dispatcher(val): return (val,) @array_function_dispatch(_real_dispatcher) def real(val): """ Return the real part of the complex argument. Parameters ---------- val : array_like Input array. Returns ------- out : ndarray or scalar The real component of the complex argument. If `val` is real, the type of `val` is used for the output. If `val` has complex elements, the returned type is float. See Also -------- real_if_close, imag, angle Examples -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real array([1., 3., 5.]) >>> a.real = 9 >>> a array([9.+2.j, 9.+4.j, 9.+6.j]) >>> a.real = np.array([9, 8, 7]) >>> a array([9.+2.j, 8.+4.j, 7.+6.j]) >>> np.real(1 + 1j) 1.0 """ try: return val.real except AttributeError: return asanyarray(val).real def _imag_dispatcher(val): return (val,) @array_function_dispatch(_imag_dispatcher) def imag(val): """ Return the imaginary part of the complex argument. Parameters ---------- val : array_like Input array. Returns ------- out : ndarray or scalar The imaginary component of the complex argument. If `val` is real, the type of `val` is used for the output. If `val` has complex elements, the returned type is float. See Also -------- real, angle, real_if_close Examples -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag array([2., 4., 6.]) >>> a.imag = np.array([8, 10, 12]) >>> a array([1. +8.j, 3.+10.j, 5.+12.j]) >>> np.imag(1 + 1j) 1.0 """ try: return val.imag except AttributeError: return asanyarray(val).imag def _is_type_dispatcher(x): return (x,) @array_function_dispatch(_is_type_dispatcher) def iscomplex(x): """ Returns a bool array, where True if input element is complex. What is tested is whether the input has a non-zero imaginary part, not if the input type is complex. Parameters ---------- x : array_like Input array. Returns ------- out : ndarray of bools Output array. See Also -------- isreal iscomplexobj : Return True if x is a complex type or an array of complex numbers. Examples -------- >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([ True, False, False, False, False, True]) """ ax = asanyarray(x) if issubclass(ax.dtype.type, _nx.complexfloating): return ax.imag != 0 res = zeros(ax.shape, bool) return res[()] # convert to scalar if needed @array_function_dispatch(_is_type_dispatcher) def isreal(x): """ Returns a bool array, where True if input element is real. If element has complex type with zero complex part, the return value for that element is True. Parameters ---------- x : array_like Input array. Returns ------- out : ndarray, bool Boolean array of same shape as `x`. Notes ----- `isreal` may behave unexpectedly for string or object arrays (see examples) See Also -------- iscomplex isrealobj : Return True if x is not a complex type. Examples -------- >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) >>> np.isreal(a) array([False, True, True, True, True, False]) The function does not work on string arrays. >>> a = np.array([2j, "a"], dtype="U") >>> np.isreal(a) # Warns about non-elementwise comparison False Returns True for all elements in input array of ``dtype=object`` even if any of the elements is complex. >>> a = np.array([1, "2", 3+4j], dtype=object) >>> np.isreal(a) array([ True, True, True]) isreal should not be used with object arrays >>> a = np.array([1+2j, 2+1j], dtype=object) >>> np.isreal(a) array([ True, True]) """ return imag(x) == 0 @array_function_dispatch(_is_type_dispatcher) def iscomplexobj(x): """ Check for a complex type or an array of complex numbers. The type of the input is checked, not the value. Even if the input has an imaginary part equal to zero, `iscomplexobj` evaluates to True. Parameters ---------- x : any The input can be of any type and shape. Returns ------- iscomplexobj : bool The return value, True if `x` is of a complex type or has at least one complex element. See Also -------- isrealobj, iscomplex Examples -------- >>> np.iscomplexobj(1) False >>> np.iscomplexobj(1+0j) True >>> np.iscomplexobj([3, 1+0j, True]) True """ try: dtype = x.dtype type_ = dtype.type except AttributeError: type_ = asarray(x).dtype.type return issubclass(type_, _nx.complexfloating) @array_function_dispatch(_is_type_dispatcher) def isrealobj(x): """ Return True if x is a not complex type or an array of complex numbers. The type of the input is checked, not the value. So even if the input has an imaginary part equal to zero, `isrealobj` evaluates to False if the data type is complex. Parameters ---------- x : any The input can be of any type and shape. Returns ------- y : bool The return value, False if `x` is of a complex type. See Also -------- iscomplexobj, isreal Notes ----- The function is only meant for arrays with numerical values but it accepts all other objects. Since it assumes array input, the return value of other objects may be True. >>> np.isrealobj('A string') True >>> np.isrealobj(False) True >>> np.isrealobj(None) True Examples -------- >>> np.isrealobj(1) True >>> np.isrealobj(1+0j) False >>> np.isrealobj([3, 1+0j, True]) False """ return not iscomplexobj(x) #----------------------------------------------------------------------------- def _getmaxmin(t): from numpy.core import getlimits f = getlimits.finfo(t) return f.max, f.min def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): return (x,) @array_function_dispatch(_nan_to_num_dispatcher) def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): """ Replace NaN with zero and infinity with large finite numbers (default behaviour) or with the numbers defined by the user using the `nan`, `posinf` and/or `neginf` keywords. If `x` is inexact, NaN is replaced by zero or by the user defined value in `nan` keyword, infinity is replaced by the largest finite floating point values representable by ``x.dtype`` or by the user defined value in `posinf` keyword and -infinity is replaced by the most negative finite floating point values representable by ``x.dtype`` or by the user defined value in `neginf` keyword. For complex dtypes, the above is applied to each of the real and imaginary components of `x` separately. If `x` is not inexact, then no replacements are made. Parameters ---------- x : scalar or array_like Input data. copy : bool, optional Whether to create a copy of `x` (True) or to replace values in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. .. versionadded:: 1.13 nan : int, float, optional Value to be used to fill NaN values. If no value is passed then NaN values will be replaced with 0.0. .. versionadded:: 1.17 posinf : int, float, optional Value to be used to fill positive infinity values. If no value is passed then positive infinity values will be replaced with a very large number. .. versionadded:: 1.17 neginf : int, float, optional Value to be used to fill negative infinity values. If no value is passed then negative infinity values will be replaced with a very small (or negative) number. .. versionadded:: 1.17 Returns ------- out : ndarray `x`, with the non-finite values replaced. If `copy` is False, this may be `x` itself. See Also -------- isinf : Shows which elements are positive or negative infinity. isneginf : Shows which elements are negative infinity. isposinf : Shows which elements are positive infinity. isnan : Shows which elements are Not a Number (NaN). isfinite : Shows which elements are finite (not NaN, not infinity) Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Examples -------- >>> np.nan_to_num(np.inf) 1.7976931348623157e+308 >>> np.nan_to_num(-np.inf) -1.7976931348623157e+308 >>> np.nan_to_num(np.nan) 0.0 >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) >>> np.nan_to_num(x) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) >>> np.nan_to_num(y) array([ 1.79769313e+308 +0.00000000e+000j, # may vary 0.00000000e+000 +0.00000000e+000j, 0.00000000e+000 +1.79769313e+308j]) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type isscalar = (x.ndim == 0) if not issubclass(xtype, _nx.inexact): return x[()] if isscalar else x iscomplex = issubclass(xtype, _nx.complexfloating) dest = (x.real, x.imag) if iscomplex else (x,) maxf, minf = _getmaxmin(x.real.dtype) if posinf is not None: maxf = posinf if neginf is not None: minf = neginf for d in dest: idx_nan = isnan(d) idx_posinf = isposinf(d) idx_neginf = isneginf(d) _nx.copyto(d, nan, where=idx_nan) _nx.copyto(d, maxf, where=idx_posinf) _nx.copyto(d, minf, where=idx_neginf) return x[()] if isscalar else x #----------------------------------------------------------------------------- def _real_if_close_dispatcher(a, tol=None): return (a,) @array_function_dispatch(_real_if_close_dispatcher) def real_if_close(a, tol=100): """ If input is complex with all imaginary parts close to zero, return real parts. "Close to zero" is defined as `tol` * (machine epsilon of the type for `a`). Parameters ---------- a : array_like Input array. tol : float Tolerance in machine epsilons for the complex part of the elements in the array. Returns ------- out : ndarray If `a` is real, the type of `a` is used for the output. If `a` has complex elements, the returned type is float. See Also -------- real, imag, angle Notes ----- Machine epsilon varies from machine to machine and between data types but Python floats on most platforms have a machine epsilon equal to 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print out the machine epsilon for floats. Examples -------- >>> np.finfo(float).eps 2.2204460492503131e-16 # may vary >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) array([2.1, 5.2]) >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) array([2.1+4.e-13j, 5.2 + 3e-15j]) """ a = asanyarray(a) if not issubclass(a.dtype.type, _nx.complexfloating): return a if tol > 1: from numpy.core import getlimits f = getlimits.finfo(a.dtype.type) tol = f.eps * tol if _nx.all(_nx.absolute(a.imag) < tol): a = a.real return a #----------------------------------------------------------------------------- _namefromtype = {'S1': 'character', '?': 'bool', 'b': 'signed char', 'B': 'unsigned char', 'h': 'short', 'H': 'unsigned short', 'i': 'integer', 'I': 'unsigned integer', 'l': 'long integer', 'L': 'unsigned long integer', 'q': 'long long integer', 'Q': 'unsigned long long integer', 'f': 'single precision', 'd': 'double precision', 'g': 'long precision', 'F': 'complex single precision', 'D': 'complex double precision', 'G': 'complex long double precision', 'S': 'string', 'U': 'unicode', 'V': 'void', 'O': 'object' } @set_module('numpy') def typename(char): """ Return a description for the given data type code. Parameters ---------- char : str Data type code. Returns ------- out : str Description of the input data type code. See Also -------- dtype, typecodes Examples -------- >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: ... print(typechar, ' : ', np.typename(typechar)) ... S1 : character ? : bool B : unsigned char D : complex double precision G : complex long double precision F : complex single precision I : unsigned integer H : unsigned short L : unsigned long integer O : object Q : unsigned long long integer S : string U : unicode V : void b : signed char d : double precision g : long precision f : single precision i : integer h : short l : long integer q : long long integer """ return _namefromtype[char] #----------------------------------------------------------------------------- #determine the "minimum common type" for a group of arrays. array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble], [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]] array_precision = {_nx.half: 0, _nx.single: 1, _nx.double: 2, _nx.longdouble: 3, _nx.csingle: 1, _nx.cdouble: 2, _nx.clongdouble: 3} def _common_type_dispatcher(*arrays): return arrays @array_function_dispatch(_common_type_dispatcher) def common_type(*arrays): """ Return a scalar type which is common to the input arrays. The return type will always be an inexact (i.e. floating point) scalar type, even if all the arrays are integer arrays. If one of the inputs is an integer array, the minimum precision type that is returned is a 64-bit floating point dtype. All input arrays except int64 and uint64 can be safely cast to the returned dtype without loss of information. Parameters ---------- array1, array2, ... : ndarrays Input arrays. Returns ------- out : data type code Data type code. See Also -------- dtype, mintypecode Examples -------- >>> np.common_type(np.arange(2, dtype=np.float32)) <class 'numpy.float32'> >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) <class 'numpy.float64'> >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) <class 'numpy.complex128'> """ is_complex = False precision = 0 for a in arrays: t = a.dtype.type if iscomplexobj(a): is_complex = True if issubclass(t, _nx.integer): p = 2 # array_precision[_nx.double] else: p = array_precision.get(t, None) if p is None: raise TypeError("can't get common type for non-numeric array") precision = max(precision, p) if is_complex: return array_type[1][precision] else: return array_type[0][precision]
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import webob import webob.dec import nova.api.openstack.compute import nova.auth.manager from nova.api.openstack import auth from nova import context from nova import db from nova import test from nova.tests.api.openstack import fakes class Test(test.TestCase): def setUp(self): super(Test, self).setUp() self.stubs.Set(auth.AuthMiddleware, '__init__', fakes.fake_auth_init) self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) fakes.FakeAuthManager.clear_fakes() fakes.FakeAuthDatabase.data = {} fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_networking(self.stubs) def tearDown(self): fakes.fake_data_store = {} super(Test, self).tearDown() def test_authorize_user(self): f = fakes.FakeAuthManager() user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) f.add_user(user) req = webob.Request.blank('/v2/') req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' req.headers['X-Auth-Project-Id'] = 'user1_project' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") def test_authorize_token(self): f = fakes.FakeAuthManager() user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) f.add_user(user) f.create_project('user1_project', user) req = webob.Request.blank('/v2/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], "http://foo/v2/user1_project") self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack.compute, 'APIRouter', fakes.FakeRouter) req = webob.Request.blank('/v2/user1_project') req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') def test_token_expiry(self): self.destroy_called = False def destroy_token_mock(meh, context, token): self.destroy_called = True def bad_token(meh, context, token_hash): return fakes.FakeToken( token_hash=token_hash, created_at=datetime.datetime(1990, 1, 1)) self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy', destroy_token_mock) self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get', bad_token) req = webob.Request.blank('/v2/') req.headers['X-Auth-Token'] = 'token_hash' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') self.assertEqual(self.destroy_called, True) def test_authorize_project(self): f = fakes.FakeAuthManager() user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) f.add_user(user) f.create_project('user1_project', user) f.create_project('user2_project', user) req = webob.Request.blank('/v2/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack.compute, 'APIRouter', fakes.FakeRouter) req = webob.Request.blank('/v2/user2_project') req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') def test_bad_user_bad_key(self): req = webob.Request.blank('/v2/') req.headers['X-Auth-User'] = 'unknown_user' req.headers['X-Auth-Key'] = 'unknown_user_key' req.headers['X-Auth-Project-Id'] = 'user_project' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_bad_user_good_key(self): f = fakes.FakeAuthManager() user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) f.add_user(user) req = webob.Request.blank('/v2/') req.headers['X-Auth-User'] = 'unknown_user' req.headers['X-Auth-Key'] = 'user1_key' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_no_user(self): req = webob.Request.blank('/v2/') result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_bad_token(self): req = webob.Request.blank('/v2/') req.headers['X-Auth-Token'] = 'unknown_token' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_bad_project(self): f = fakes.FakeAuthManager() user1 = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) user2 = nova.auth.manager.User('id2', 'user2', 'user2_key', None, None) f.add_user(user1) f.add_user(user2) f.create_project('user1_project', user1) f.create_project('user2_project', user2) req = webob.Request.blank('/v2/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack.compute, 'APIRouter', fakes.FakeRouter) req = webob.Request.blank('/v2/user2_project') req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_not_authorized_project(self): f = fakes.FakeAuthManager() user1 = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) f.add_user(user1) f.create_project('user1_project', user1) user2 = nova.auth.manager.User('id2', 'user2', 'user2_key', None, None) f.add_user(user2) f.create_project('user2_project', user2) req = webob.Request.blank('/v2/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack.compute, 'APIRouter', fakes.FakeRouter) req = webob.Request.blank('/v2/user2_project') req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') class TestFunctional(test.TestCase): def test_token_expiry(self): ctx = context.get_admin_context() tok = db.auth_token_create(ctx, dict( token_hash='test_token_hash', cdn_management_url='', server_management_url='', storage_url='', user_id='user1', )) db.auth_token_update(ctx, tok.token_hash, dict( created_at=datetime.datetime(2000, 1, 1, 12, 0, 0), )) req = webob.Request.blank('/v2/') req.headers['X-Auth-Token'] = 'test_token_hash' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_token_doesnotexist(self): req = webob.Request.blank('/v2/') req.headers['X-Auth-Token'] = 'nonexistant_token_hash' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') class TestLimiter(test.TestCase): def setUp(self): super(TestLimiter, self).setUp() self.stubs.Set(auth.AuthMiddleware, '__init__', fakes.fake_auth_init) self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) fakes.FakeAuthManager.clear_fakes() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) def tearDown(self): fakes.fake_data_store = {} super(TestLimiter, self).tearDown() def test_authorize_token(self): f = fakes.FakeAuthManager() user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None) f.add_user(user) f.create_project('test', user) req = webob.Request.blank('/v2/') req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(len(result.headers['X-Auth-Token']), 40) token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack.compute, 'APIRouter', fakes.FakeRouter) req = webob.Request.blank('/v2/test') req.method = 'POST' req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') class TestNoAuthMiddleware(test.TestCase): def setUp(self): super(TestNoAuthMiddleware, self).setUp() self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) fakes.FakeAuthManager.clear_fakes() fakes.FakeAuthDatabase.data = {} fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_networking(self.stubs) def tearDown(self): fakes.fake_data_store = {} super(TestNoAuthMiddleware, self).tearDown() def test_authorize_user(self): req = webob.Request.blank('/v2') req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' req.headers['X-Auth-Project-Id'] = 'user1_project' result = req.get_response(fakes.wsgi_app(fake_auth=False, use_no_auth=True)) self.assertEqual(result.status, '204 No Content') self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") self.assertEqual(result.headers['X-Server-Management-Url'], "http://localhost/v2/user1_project") def test_authorize_user_trailing_slash(self): #make sure it works with trailing slash on the request req = webob.Request.blank('/v2/') req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' req.headers['X-Auth-Project-Id'] = 'user1_project' result = req.get_response(fakes.wsgi_app(fake_auth=False, use_no_auth=True)) self.assertEqual(result.status, '204 No Content') self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") self.assertEqual(result.headers['X-Server-Management-Url'], "http://localhost/v2/user1_project")
# -*- coding: utf-8 -*- __author__ = "isparks" import unittest from rwslib.builders import * from rwslib.tests.common import obj_to_doc from datetime import datetime class TestClinicalData(unittest.TestCase): """Test ClinicalData classes""" def setUp(self): self.tested = ClinicalData("STUDY1", "DEV")( SubjectData("SITE1", "SUBJECT1")( StudyEventData("VISIT_1")( FormData("TESTFORM_A")( ItemGroupData()( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ), ItemGroupData(item_group_repeat_key=2)( ItemData("Field3", "ValueC") ), ) ) ) ) def test_basic(self): """Test there are 3 children""" self.assertEqual("STUDY1", self.tested.projectname) self.assertEqual("DEV", self.tested.environment) # Test default MetadataVersionOID self.assertEqual("1", self.tested.metadata_version_oid) def test_metadata_version_oid(self): """ Test we can handle a MDV as a String """ self.tested.metadata_version_oid = "2" doc = obj_to_doc(self.tested) self.assertEqual( doc.attrib["MetaDataVersionOID"], self.tested.metadata_version_oid ) def test_metadata_version_oid_as_int(self): """ Test that we can handle a MDV as an integer (which we are mandating in the IG) """ self.tested.metadata_version_oid = 56 doc = obj_to_doc(self.tested) self.assertEqual( doc.attrib["MetaDataVersionOID"], str(self.tested.metadata_version_oid) ) def test_only_accepts_subjectdata(self): """Test that only SubjectData can be inserted""" tested = ClinicalData("STUDY1", "DEV") def do(): tested << object() self.assertRaises(ValueError, do) def test_builder(self): """XML produced""" doc = obj_to_doc(self.tested) self.assertEqual(doc.tag, "ClinicalData") def test_add_to_odm(self): """We can add multiple ClinicalData to an ODM""" odm = ODM("Some test case") odm << ClinicalData("Study1", "Dev") odm << ClinicalData("Study1", "Dev") tested = obj_to_doc(odm) self.assertEqual("ODM", tested.tag) self.assertTrue(2, len(list(tested))) class TestSubjectData(unittest.TestCase): """Test SubjectData classes""" def setUp(self): self.tested = SubjectData("SITE1", "SUBJECT1")( StudyEventData("VISIT_1")( FormData("TESTFORM_A")( ItemGroupData()( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ), ItemGroupData(item_group_repeat_key=2)( ItemData("Field3", "ValueC") ), ) ) ) def test_basic(self): """Test there are 3 children""" self.assertEqual("SITE1", self.tested.sitelocationoid) self.assertEqual("SUBJECT1", self.tested.subject_key) # Default transaction type self.assertEqual("Update", self.tested.transaction_type) def test_invalid_transaction_type_direct_assign(self): """Test transaction type will not allow you to set to invalid choice""" def do(): self.tested.transaction_type = "invalid" self.assertRaises(AttributeError, do) def test_children(self): """Test there is 1 child""" self.assertEqual(1, len(self.tested.study_events)) def test_builder(self): """XML produced""" doc = obj_to_doc(self.tested) # Test default transaction tyoe self.assertEqual(doc.attrib["TransactionType"], "Update") self.assertEqual(doc.tag, "SubjectData") def test_only_add_studyeventdata_once(self): """Test that a StudyEventData object can only be added once""" sed = StudyEventData("V1") self.tested << sed def do(): self.tested << sed self.assertRaises(ValueError, do) def test_does_not_accept_all_elements(self): """Test that,for example, ItemData cannot be accepted""" def do(): self.tested << ItemData("Field1", "ValueC") self.assertRaises(ValueError, do) def test_accepts_auditrecord(self): """Test that AuditRecord can be inserted""" ar = AuditRecord( used_imputation_method=False, identifier="ABC1", include_file_oid=False )( UserRef("test_user"), LocationRef("test_site"), ReasonForChange("Testing"), DateTimeStamp(datetime.now()), ) self.tested << ar self.assertEqual(self.tested.audit_record, ar) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue( len(list(t)) == 3 ) # 1 StudyEventData + 1 SiteRef + 1 AuditRecord def test_add_annotations(self): """Test we can add one or more annotations""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] for i in range(0, 4): self.tested << Annotation( comment=Comment("Some Comment %s" % i), flags=flags ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue( len(list(t)) == 6 ) # 1 StudyEventData + 1 SiteRef + 4 annotations def test_add_signature(self): """Test we can add one signature""" self.tested << Signature( signature_id="Some ID", user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 3) # 1 studyeventdata + 1 SiteRef + 1 signature def test_multiple_subject_data(self): """We can add multiple SubjectData to the Clinical Data""" cd = ClinicalData("Mediflex", "Prod") cd << SubjectData("Site1", "Subject1") cd << SubjectData("Site1", "Subject2") doc = obj_to_doc(cd) self.assertEqual(2, len(doc)) class TestStudyEventData(unittest.TestCase): """Test StudyEventData classes""" def setUp(self): self.tested = StudyEventData("VISIT_1")( FormData("TESTFORM_A")( ItemGroupData()( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ), ItemGroupData(item_group_repeat_key=2)(ItemData("Field3", "ValueC")), ) ) def test_transaction_type(self): """Test transaction type inserted if set""" self.tested.transaction_type = "Update" doc = obj_to_doc(self.tested) self.assertEqual(doc.attrib["TransactionType"], self.tested.transaction_type) def test_builders_basic(self): doc = obj_to_doc(self.tested) self.assertEqual(doc.attrib["StudyEventOID"], "VISIT_1") self.assertIsNone(doc.attrib.get("StudyEventRepeatKey")) self.assertEqual(len(doc), 1) self.assertEqual(doc.tag, "StudyEventData") def test_study_event_repeat_key(self): """ If supplied we export the study event repeat key""" tested = StudyEventData("VISIT_1", study_event_repeat_key="1")( FormData("TESTFORM_A")( ItemGroupData()( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ), ItemGroupData(item_group_repeat_key=2)(ItemData("Field3", "ValueC")), ) ) t = obj_to_doc(tested) self.assertEqual("StudyEventData", t.tag) self.assertEqual("1", t.attrib["StudyEventRepeatKey"]) def test_study_event_repeat_key_as_int(self): """ If supplied we export the study event repeat key""" tested = StudyEventData("VISIT_1", study_event_repeat_key=1)( FormData("TESTFORM_A")( ItemGroupData()( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ), ItemGroupData(item_group_repeat_key=2)(ItemData("Field3", "ValueC")), ) ) t = obj_to_doc(tested) self.assertEqual("StudyEventData", t.tag) self.assertEqual("1", t.attrib["StudyEventRepeatKey"]) def test_only_add_formdata_once(self): """Test that an FormData object can only be added once""" fd = FormData("FORM1") self.tested << fd def do(): self.tested << fd self.assertRaises(ValueError, do) def test_add_annotations(self): """Test we can add one or more annotations""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] for i in range(0, 4): self.tested << Annotation( comment=Comment("Some Comment %s" % i), flags=flags ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 5) # one formdata + 4 annotations def test_add_signature(self): """Test we can add one signature""" self.tested << Signature( signature_id="Some ID", user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 2) # 1 formdata + 1 signature def test_invalid_transaction_type_direct_assign(self): """Test transaction type will not allow you to set to invalid choice""" def do(): self.tested.transaction_type = "invalid" self.assertRaises(AttributeError, do) def test_only_accepts_formdata(self): """Test that only FormData can be inserted""" def do(): # Bzzzt. Should be ItemGroupData self.tested << ItemData("Field1", "ValueC") self.assertRaises(ValueError, do) class TestFormData(unittest.TestCase): """Test FormData classes""" def setUp(self): self.tested = FormData("TESTFORM_A")( ItemGroupData()(ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB")), ItemGroupData()(ItemData("Field3", "ValueC")), ItemGroupData()(ItemData("Field4", "ValueD")), ) def test_children(self): """Test there are 3 children""" self.assertEqual(3, len(self.tested.itemgroups)) def test_invalid_transaction_type(self): """Test transaction type will not allow you to set to invalid choice""" def do(): FormData("MYFORM", transaction_type="invalid") self.assertRaises(AttributeError, do) def test_only_accepts_itemgroupdata(self): """Test that only ItemGroupData can be inserted""" def do(): # Bzzzt. Should be ItemGroupData self.tested << ItemData("Field1", "ValueC") self.assertRaises(ValueError, do) def test_only_add_itemgroup_once(self): """Test that an ItemGroupData can only be added once""" igd = ItemGroupData() self.tested << igd def do(): self.tested << igd self.assertRaises(ValueError, do) def test_builders_basic(self): doc = obj_to_doc(self.tested) self.assertEqual(doc.attrib["FormOID"], "TESTFORM_A") self.assertEqual(len(doc), 3) self.assertEqual(doc.tag, "FormData") def test_transaction_type(self): """Test transaction type inserted if set""" self.tested.transaction_type = "Update" doc = obj_to_doc(self.tested) self.assertEqual(doc.attrib["TransactionType"], self.tested.transaction_type) def test_invalid_transaction_type_direct_assign(self): """Test transaction type will not allow you to set to invalid choice""" def do(): self.tested.transaction_type = "invalid" self.assertRaises(AttributeError, do) def test_form_repeat_key(self): """Test transaction type inserted if set""" tested = FormData("TESTFORM_A", form_repeat_key=9)( ItemGroupData()(ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB")) ) doc = obj_to_doc(tested) self.assertEqual(doc.attrib["FormRepeatKey"], "9") def test_add_annotations(self): """Test we can add one or more annotations""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] for i in range(0, 4): self.tested << Annotation( comment=Comment("Some Comment %s" % i), flags=flags ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 7) # three igdata + 4 annotations def test_add_signature(self): """Test we can add one signature""" self.tested << Signature( signature_id="Some ID", user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 4) # three igdata + 1 signature class TestItemGroupData(unittest.TestCase): """Test ItemGroupData classes""" def setUp(self): self.tested = ItemGroupData()( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ) def test_children(self): """Test there are 2 children""" self.assertEqual(2, len(self.tested.items)) def test_two_same_invalid(self): """Test adding a duplicate field causes error""" def do(): self.tested << ItemData("Field1", "ValueC") self.assertRaises(ValueError, do) def test_only_accepts_itemdata(self): """Test that an ItemGroupData will only accept an ItemData element""" def do(): self.tested << {"Field1": "ValueC"} self.assertRaises(ValueError, do) def test_invalid_transaction_type(self): def do(): ItemGroupData(transaction_type="invalid") self.assertRaises(AttributeError, do) def test_builders_basic(self): doc = obj_to_doc(self.tested, "TESTFORM") self.assertEqual(doc.attrib["ItemGroupOID"], "TESTFORM") self.assertEqual(len(doc), 2) self.assertEqual(doc.tag, "ItemGroupData") def test_transaction_type(self): """Test transaction type inserted if set""" self.tested.transaction_type = "Context" doc = obj_to_doc(self.tested, "TESTFORM") self.assertEqual(doc.attrib["TransactionType"], "Context") def test_whole_item_group(self): """mdsol:Submission should be wholeitemgroup or SpecifiedItemsOnly""" doc = obj_to_doc(self.tested, "TESTFORM") self.assertEqual(doc.attrib["mdsol:Submission"], "SpecifiedItemsOnly") self.tested.whole_item_group = True doc = obj_to_doc(self.tested, "TESTFORM") self.assertEqual(doc.attrib["mdsol:Submission"], "WholeItemGroup") def test_add_annotations(self): """Test we can add one or more annotations""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] for i in range(0, 4): self.tested << Annotation( comment=Comment("Some Comment %s" % i), flags=flags ) t = obj_to_doc(self.tested, "TESTFORM") self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 6) # two itemdata + 4 annotations def test_add_annotations_on_create_multiple(self): """Test we can add one or more annotations at initialisation""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] annotations = [ Annotation(comment=Comment("Some Comment %s" % i), flags=flags) for i in range(0, 4) ] # add a list of annotations igd = ItemGroupData(annotations=annotations)( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ) t = obj_to_doc(igd, "TESTFORM") self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 6) # two itemdata + 4 annotations def test_add_annotations_on_create_single(self): """Test we can add one or more annotations at initialisation with one""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] annotations = [ Annotation(comment=Comment("Some Comment %s" % i), flags=flags) for i in range(0, 4) ] # add a list of annotations igd = ItemGroupData(annotations=annotations[0])( ItemData("Field1", "ValueA"), ItemData("Field2", "ValueB") ) t = obj_to_doc(igd, "TESTFORM") self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 3) # two itemdata + 4 annotations def test_add_signature(self): """Test we can add one signature""" self.tested << Signature( signature_id="Some ID", user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) t = obj_to_doc(self.tested, "TESTFORM") self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 3) # two itemdata + 1 signature class TestItemData(unittest.TestCase): """Test ItemData classes""" def setUp(self): self.tested = ItemData("FIELDA", "TEST") def test_basic(self): tested = self.tested self.assertEqual(tested.itemoid, "FIELDA") self.assertEqual(tested.value, "TEST") self.assertEqual(tested.lock, None) self.assertEqual(tested.freeze, None) self.assertEqual(tested.verify, None) def test_only_accepts_itemdata(self): """Test that an ItemData will not accept any old object""" with self.assertRaises(ValueError): self.tested << {"Field1": "ValueC"} def test_accepts_query(self): """Test that an ItemData will accept a query""" query = MdsolQuery() self.tested << query self.assertEqual(query, self.tested.queries[0]) def test_accepts_measurement_unit_ref(self): """Test that an ItemData will accept a measurement unit ref""" mur = MeasurementUnitRef("Celsius") self.tested << mur self.assertEqual(mur, self.tested.measurement_unit_ref) def test_isnull_not_set(self): """Isnull should not be set where we have a value not in '', None""" doc = obj_to_doc(self.tested) # Check IsNull attribute is missing def do(): doc.attrib["IsNull"] self.assertRaises(KeyError, do) def test_specify(self): """Test specify""" specify_value = "A Specify" self.tested.specify_value = specify_value doc = obj_to_doc(self.tested) self.assertEqual(doc.attrib["mdsol:SpecifyValue"], specify_value) def test_freeze_lock_verify(self): tested = ItemData("FIELDA", "TEST", lock=True, verify=True, freeze=False) self.assertEqual(tested.lock, True) self.assertEqual(tested.freeze, False) self.assertEqual(tested.verify, True) def test_builder(self): """Test building XML""" tested = ItemData("FIELDA", "TEST", lock=True, verify=True, freeze=False) tested << AuditRecord( edit_point=AuditRecord.EDIT_DATA_MANAGEMENT, used_imputation_method=False, identifier="x2011", include_file_oid=False, )( UserRef("Fred"), LocationRef("Site102"), ReasonForChange("Data Entry Error"), DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80)), ) tested << MdsolQuery() tested << MeasurementUnitRef("Celsius") doc = obj_to_doc(tested) self.assertEqual(doc.attrib["ItemOID"], "FIELDA") self.assertEqual(doc.attrib["Value"], "TEST") self.assertEqual(doc.attrib["mdsol:Verify"], "Yes") self.assertEqual(doc.attrib["mdsol:Lock"], "Yes") self.assertEqual(doc.attrib["mdsol:Freeze"], "No") self.assertEqual(doc.tag, "ItemData") self.assertEqual("AuditRecord", list(doc)[0].tag) self.assertEqual("MeasurementUnitRef", list(doc)[1].tag) self.assertEqual("mdsol:Query", list(doc)[2].tag) def test_transaction_type(self): tested = self.tested tested.transaction_type = "Update" doc = obj_to_doc(tested) self.assertEqual(doc.attrib["TransactionType"], "Update") def test_null_value(self): """Null or empty string values are treated specially with IsNull property and no value""" tested = self.tested tested.value = "" doc = obj_to_doc(tested) self.assertEqual(doc.attrib["IsNull"], "Yes") # Check Value attribute is also missing def do(): doc.attrib["Value"] self.assertRaises(KeyError, do) def test_invalid_transaction_type(self): def do(): ItemData("A", "val", transaction_type="invalid") self.assertRaises(AttributeError, do) def test_add_annotations(self): """Test we can add one or more annotations""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] for i in range(0, 4): self.tested << Annotation( comment=Comment("Some Comment %s" % i), flags=flags ) t = obj_to_doc(self.tested) self.assertEqual(self.__class__.__name__[4:], t.tag) self.assertTrue(len(list(t)) == 4) # one formdata + 4 annotations class TestUserRef(unittest.TestCase): def test_accepts_no_children(self): with self.assertRaises(ValueError): UserRef("Gertrude") << object() def test_builder(self): """Test building XML""" tested = UserRef("Fred") doc = obj_to_doc(tested) self.assertEqual(doc.attrib["UserOID"], "Fred") self.assertEqual(doc.tag, "UserRef") class TestLocationRef(unittest.TestCase): def test_accepts_no_children(self): with self.assertRaises(ValueError): LocationRef("Nowhereville") << object() def test_builder(self): """Test building XML""" tested = LocationRef("Gainesville") doc = obj_to_doc(tested) self.assertEqual(doc.attrib["LocationOID"], "Gainesville") self.assertEqual(doc.tag, "LocationRef") def test_builder_int_oid(self): """Test building XML""" tested = LocationRef(12) doc = obj_to_doc(tested) self.assertEqual(doc.attrib["LocationOID"], "12") self.assertEqual(doc.tag, "LocationRef") class TestReasonForChange(unittest.TestCase): def test_accepts_no_children(self): with self.assertRaises(ValueError): ReasonForChange("Because I wanted to") << object() def test_builder(self): """Test building XML""" tested = ReasonForChange("Testing 1..2..3") doc = obj_to_doc(tested) self.assertEqual("Testing 1..2..3", doc.text) self.assertEqual(doc.tag, "ReasonForChange") class TestDateTimeStamp(unittest.TestCase): def test_accepts_no_children(self): with self.assertRaises(ValueError): DateTimeStamp(datetime.now()) << object() def test_builder_with_datetime(self): dt = datetime(2015, 9, 11, 10, 15, 22, 80) tested = DateTimeStamp(dt) doc = obj_to_doc(tested) self.assertEqual(dt_to_iso8601(dt), doc.text) self.assertEqual(doc.tag, "DateTimeStamp") def test_builder_with_string(self): dt = "2009-02-04T14:10:32-05:00" tested = DateTimeStamp(dt) doc = obj_to_doc(tested) self.assertEqual(dt, doc.text) self.assertEqual(doc.tag, "DateTimeStamp") class TestAuditRecord(unittest.TestCase): def setUp(self): self.tested = AuditRecord( edit_point=AuditRecord.EDIT_DATA_MANAGEMENT, used_imputation_method=False, identifier="X2011", include_file_oid=False, ) self.tested << UserRef("Fred") self.tested << LocationRef("Site102") self.tested << ReasonForChange("Data Entry Error") self.tested << DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80)) def test_identifier_must_not_start_digit(self): with self.assertRaises(AttributeError): AuditRecord(identifier="2011") with self.assertRaises(AttributeError): AuditRecord(identifier="*Hello") # Underscore OK ar = AuditRecord(identifier="_Hello") self.assertEqual("_Hello", ar.audit_id) # Letter OK ar = AuditRecord(identifier="Hello") self.assertEqual("Hello", ar.audit_id) def test_accepts_no_invalid_children(self): with self.assertRaises(ValueError): AuditRecord() << object() def test_invalid_edit_point(self): with self.assertRaises(AttributeError): AuditRecord(edit_point="Blah") def test_builder(self): doc = obj_to_doc(self.tested) self.assertEqual(doc.tag, "AuditRecord") self.assertEqual(AuditRecord.EDIT_DATA_MANAGEMENT, doc.attrib["EditPoint"]) self.assertEqual("No", doc.attrib["UsedImputationMethod"]) self.assertEqual("No", doc.attrib["mdsol:IncludeFileOID"]) self.assertEqual("UserRef", list(doc)[0].tag) self.assertEqual("LocationRef", list(doc)[1].tag) self.assertEqual("DateTimeStamp", list(doc)[2].tag) self.assertEqual("ReasonForChange", list(doc)[3].tag) def test_no_user_ref(self): """Test with no user ref should fail on build with a ValueError""" self.tested.user_ref = None with self.assertRaises(ValueError) as err: doc = obj_to_doc(self.tested) self.assertIn("UserRef", err.exception.message) def test_no_location_ref(self): """Test with no location ref should fail on build with a ValueError""" self.tested.location_ref = None with self.assertRaises(ValueError) as err: doc = obj_to_doc(self.tested) self.assertIn("LocationRef", err.exception.message) def test_no_datetime_stamp(self): """Test with no datetimestamp should fail on build with a ValueError""" self.tested.date_time_stamp = None with self.assertRaises(ValueError) as err: doc = obj_to_doc(self.tested) self.assertIn("DateTimeStamp", err.exception.message) class TestSignatureRef(unittest.TestCase): def test_creates_expected_element(self): """We get the Signature Ref element""" t = SignatureRef("ASIGNATURE") doc = obj_to_doc(t) self.assertEqual("SignatureRef", doc.tag) self.assertEqual("ASIGNATURE", doc.attrib["SignatureOID"]) class TestSignature(unittest.TestCase): def test_creates_expected_element(self): """We create a Signature element""" t = Signature( signature_id="Some ID", user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) doc = obj_to_doc(t) self.assertEqual("Signature", doc.tag) self.assertEqual("Some ID", doc.attrib["ID"]) # all four elements are present self.assertTrue(len(list(doc)) == 4) def test_creates_expected_element_no_id(self): """We create a Signature element without an ID""" t = Signature( user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) doc = obj_to_doc(t) self.assertEqual("Signature", doc.tag) self.assertTrue("ID" not in doc.attrib) # all four elements are present self.assertTrue(len(list(doc)) == 4) def test_all_elements_are_required(self): """All the sub-elements are required""" all = dict( user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) t0 = Signature() with self.assertRaises(ValueError) as exc: doc = obj_to_doc(t0) self.assertEqual("User Reference not set.", str(exc.exception)) t1 = Signature(user_ref=all.get("user_ref")) with self.assertRaises(ValueError) as exc: doc = obj_to_doc(t1) self.assertEqual("Location Reference not set.", str(exc.exception)) t2 = Signature( user_ref=all.get("user_ref"), location_ref=all.get("location_ref") ) with self.assertRaises(ValueError) as exc: doc = obj_to_doc(t2) self.assertEqual("Signature Reference not set.", str(exc.exception)) t3 = Signature( user_ref=all.get("user_ref"), location_ref=all.get("location_ref"), signature_ref=all.get("signature_ref"), ) with self.assertRaises(ValueError) as exc: doc = obj_to_doc(t3) self.assertEqual("DateTime not set.", str(exc.exception)) def test_signature_builder(self): """""" tested = Signature(signature_id="Some ID") all = dict( user_ref=UserRef(oid="AUser"), location_ref=LocationRef(oid="ALocation"), signature_ref=SignatureRef(oid="ASignature"), date_time_stamp=DateTimeStamp( date_time=datetime( year=2016, month=12, day=25, hour=12, minute=0, second=0 ) ), ) for child in all.values(): tested << child doc = obj_to_doc(tested) self.assertEqual("Signature", doc.tag) self.assertEqual("Some ID", doc.attrib["ID"]) # all four elements are present self.assertTrue(len(list(doc)) == 4) def test_signature_builder_with_invalid_input(self): """""" tested = Signature(signature_id="Some ID") with self.assertRaises(ValueError) as exc: tested << ItemData(itemoid="GENDER", value="MALE") self.assertEqual( "Signature cannot accept a child element of type ItemData", str(exc.exception), ) class TestAnnotation(unittest.TestCase): """ Test Annotation classes """ def test_happy_path(self): """ Simple Annotation with a single flag and comment""" tested = Annotation(annotation_id="APPLE", seqnum=1) f = Flag( flag_value=FlagValue("Some value", codelist_oid="ANOID"), flag_type=FlagType("Some type", codelist_oid="ANOTHEROID"), ) c = Comment("Some Comment") tested << f tested << c t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertEqual("1", t.attrib["SeqNum"]) self.assertEqual("APPLE", t.attrib["ID"]) self.assertTrue(len(list(t)) == 2) def test_happy_path_id_optional(self): """ Simple Annotation with a single flag and comment, no ID""" tested = Annotation(seqnum=1) f = Flag( flag_value=FlagValue("Some value", codelist_oid="ANOID"), flag_type=FlagType("Some type", codelist_oid="ANOTHEROID"), ) c = Comment("Some Comment") tested << f tested << c t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertEqual("1", t.attrib["SeqNum"]) self.assertNotIn("ID", t.attrib) self.assertTrue(len(list(t)) == 2) def test_happy_path_seqnum_defaulted(self): """ Simple Annotation with a single flag and comment, SeqNum missing""" tested = Annotation() f = Flag( flag_value=FlagValue("Some value", codelist_oid="ANOID"), flag_type=FlagType("Some type", codelist_oid="ANOTHEROID"), ) c = Comment("Some Comment") tested << f tested << c t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertEqual("1", t.attrib["SeqNum"]) self.assertTrue(len(list(t)) == 2) def test_happy_path_multiple_flags(self): """ Simple Annotation with a multiple flags and comment""" tested = Annotation() c = Comment("Some Comment") # Add some flags for i in range(0, 3): tested << Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) tested << c t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertTrue(len(list(t)) == 4) def test_happy_path_multiple_flags_on_init(self): """ Simple Annotation with a multiple flags and comment created at init""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] tested = Annotation(comment=Comment("Some Comment"), flags=flags) t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertTrue(len(list(t)) == 4) def test_happy_path_flag_on_init(self): """ Simple Annotation with a single flag and comment created at init""" flags = [ Flag( flag_value=FlagValue("Some value %s" % i, codelist_oid="ANOID%s" % i), flag_type=FlagType("Some type %s" % i, codelist_oid="ANOTHEROID%s" % i), ) for i in range(0, 3) ] tested = Annotation(comment=Comment("Some Comment"), flags=flags[0]) t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertTrue(len(list(t)) == 2) def test_not_flag_on_init(self): """ Simple Annotation with not a flag raises an exception and comment created at init""" notflags = ItemData(itemoid="GENDER", value="MALE") with self.assertRaises(AttributeError) as exc: tested = Annotation(comment=Comment("Some Comment"), flags=notflags) self.assertEqual( "Flags attribute should be an iterable or Flag", str(exc.exception) ) def test_only_accept_valid_children(self): """ Annotation can only take one or more Flags and one Comment""" tested = Annotation(annotation_id="An Annotation") with self.assertRaises(ValueError) as exc: tested << ItemData(itemoid="GENDER", value="MALE") self.assertEqual( "Annotation cannot accept a child element of type ItemData", str(exc.exception), ) tested << Comment("A comment") with self.assertRaises(ValueError) as exc: tested << Comment("Another Comment") self.assertEqual( "Annotation already has a Comment element set.", str(exc.exception) ) def test_only_valid_id_accepted(self): """ Annotation ID must be a non empty string""" for nonsense in ("", " "): with self.assertRaises(AttributeError) as exc: tested = Annotation(annotation_id=nonsense) self.assertEqual( "Invalid ID value supplied", str(exc.exception), "Value should raise with '%s'" % nonsense, ) def test_only_valid_seqnum_accepted(self): """ Annotation ID must be a non empty string""" for nonsense in ("apple", " ", -1): with self.assertRaises(AttributeError) as exc: tested = Annotation(seqnum=nonsense) self.assertEqual( "Invalid SeqNum value supplied", str(exc.exception), "Value should raise with '%s'" % nonsense, ) def test_need_flags(self): """ Annotation needs a Flag """ tested = Annotation(comment=Comment("A comment")) with self.assertRaises(ValueError) as exc: t = obj_to_doc(tested) self.assertEqual("Flag is not set.", str(exc.exception)) def test_transaction_type(self): """ Annotation can take a transaction type """ tested = Annotation( flags=Flag( flag_value=FlagValue("Some value", codelist_oid="ANOID"), flag_type=FlagType("Some type", codelist_oid="ANOTHEROID"), ), comment=Comment("A comment"), transaction_type="Update", ) t = obj_to_doc(tested) self.assertEqual("Annotation", t.tag) self.assertEqual("Update", t.attrib["TransactionType"]) class TestAnnotations(unittest.TestCase): def test_happy_path(self): """We create a Annotations object and add annotations to it""" obj = Annotations() obj << Annotation(annotation_id="1")( Flag()(FlagValue("test 1", codelist_oid="MILESTONE")) ) obj << Annotation(annotation_id="2")( Flag()(FlagValue("test 2", codelist_oid="MILESTONE")) ) obj << Annotation(annotation_id="3")( Flag()(FlagValue("test 3", codelist_oid="MILESTONE")) ) tested = obj_to_doc(obj) self.assertEqual("Annotations", tested.tag) self.assertEqual(3, len(list(tested))) def test_sad_path(self): """We create a Annotations object and can't add a flag""" obj = Annotations() with self.assertRaises(ValueError) as exc: obj << Flag()(FlagValue("test 1", codelist_oid="MILESTONE")) self.assertEqual( "Annotations cannot accept a child element of type Flag", str(exc.exception) ) class TestFlag(unittest.TestCase): """ Test Flag classes """ def test_happy_path(self): """Create a Flag object""" tested = Flag() tested << FlagValue("Some value", codelist_oid="ANOID") tested << FlagType("Some type", codelist_oid="ANOTHEROID") t = obj_to_doc(tested) self.assertEqual("Flag", t.tag) self.assertTrue(len(list(t)) == 2) def test_no_value(self): """No FlagValue is an exception""" tested = Flag() tested << FlagType("Some type", codelist_oid="ANOTHEROID") with self.assertRaises(ValueError) as exc: t = obj_to_doc(tested) self.assertEqual("FlagValue is not set.", str(exc.exception)) def test_only_expected_types(self): """We can only add Flag-type elements""" tested = Flag() with self.assertRaises(ValueError) as exc: tested << ItemData(itemoid="GENDER", value="MALE") self.assertEqual( "Flag cannot accept a child element of type ItemData", str(exc.exception) ) def test_only_expected_types_instance_vars(self): """We can only add Flag-type elements""" with self.assertRaises(ValueError) as exc: tested = Flag(flag_type=ItemData(itemoid="GENDER", value="MALE")) self.assertEqual( "Flag cannot accept a child element of type ItemData", str(exc.exception) ) with self.assertRaises(ValueError) as exc: tested = Flag(flag_value=ItemData(itemoid="GENDER", value="MALE")) self.assertEqual( "Flag cannot accept a child element of type ItemData", str(exc.exception) ) class TestFlagType(unittest.TestCase): """ Test FlagType classes """ def test_happy_path(self): """Create a FlagType object""" tested = FlagType("A Type") tested.codelist_oid = "ANOID" t = obj_to_doc(tested) self.assertEqual("FlagType", t.tag) self.assertEqual("ANOID", t.attrib["CodeListOID"]) self.assertEqual("A Type", t.text) def test_no_oid_exception(self): """Create a FlagType object without a CodeListOID is an exception""" tested = FlagType("A Type") with self.assertRaises(ValueError) as exc: t = obj_to_doc(tested) self.assertEqual("CodeListOID not set.", str(exc.exception)) def test_invalid_oid_exception(self): """Assigning a nonsensical value is an error""" tested = FlagType("A Type") for nonsense in (None, "", " "): with self.assertRaises(AttributeError) as exc: tested.codelist_oid = nonsense self.assertEqual("Empty CodeListOID value is invalid.", str(exc.exception)) def test_invalid_oid_exception_at_creation(self): """Assigning a nonsensical value is an error""" with self.assertRaises(AttributeError) as exc: tested = FlagType("A Type", codelist_oid="") self.assertEqual("Empty CodeListOID value is invalid.", str(exc.exception)) class TestFlagValue(unittest.TestCase): """ Test FlagValue classes """ def test_happy_path(self): """Create a FlagValue object""" tested = FlagValue("A Value") tested.codelist_oid = "ANOID" t = obj_to_doc(tested) self.assertEqual("FlagValue", t.tag) self.assertEqual("ANOID", t.attrib["CodeListOID"]) self.assertEqual("A Value", t.text) def test_no_oid_exception(self): """Create a FlagType object without a CodeListOID is an exception""" tested = FlagValue("A Type") with self.assertRaises(ValueError) as exc: t = obj_to_doc(tested) self.assertEqual("CodeListOID not set.", str(exc.exception)) def test_invalid_oid_exception(self): """Assigning a nonsensical value is an error""" tested = FlagValue("A Type") for nonsense in (None, "", " "): with self.assertRaises(AttributeError) as exc: tested.codelist_oid = nonsense self.assertEqual("Empty CodeListOID value is invalid.", str(exc.exception)) def test_invalid_oid_exception_at_creation(self): """Assigning a nonsensical value is an error""" with self.assertRaises(AttributeError) as exc: tested = FlagValue("A Value", codelist_oid="") self.assertEqual("Empty CodeListOID value is invalid.", str(exc.exception)) class TestComment(unittest.TestCase): """ Test Comment classes """ def test_happy_path(self): """Creating a valid Comment, no problems""" tested = Comment() tested.text = "Some comment" tested.sponsor_or_site = "Site" t = obj_to_doc(tested) self.assertEqual("Comment", t.tag) self.assertEqual("Site", t.attrib["SponsorOrSite"]) self.assertEqual("Some comment", t.text) def test_happy_path_no_commenter(self): """Creating a valid Comment without a commenter, no problems""" tested = Comment() tested.text = "Some comment" t = obj_to_doc(tested) self.assertEqual("Comment", t.tag) self.assertNotIn("SponsorOrSite", t.attrib) self.assertEqual("Some comment", t.text) def test_invalid_commenter(self): """Creating a valid Comment with an invalid commenter, get an exception""" tested = Comment() tested.text = "Some comment" with self.assertRaises(AttributeError) as exc: tested.sponsor_or_site = "Some guy off the street" self.assertEqual( "Comment sponsor_or_site value of Some guy off the street is not valid", str(exc.exception), ) def test_invalid_no_comment(self): """Creating a invalid Comment, get an exception""" tested = Comment() with self.assertRaises(ValueError) as exc: t = obj_to_doc(tested) self.assertEqual("Text is not set.", str(exc.exception)) def test_invalid_text_comment(self): """Creating a Comment with invalid text, get an exception""" tested = Comment() for nonsense in (None, "", " "): with self.assertRaises(AttributeError) as exc: tested.text = nonsense self.assertEqual("Empty text value is invalid.", str(exc.exception)) class TestSourceID(unittest.TestCase): def test_create_source_id(self): """We can create a source ID""" obj = SourceID("12345") tested = obj_to_doc(obj) self.assertEqual("SourceID", tested.tag) self.assertEqual("12345", tested.text) def test_add_to_audit(self): """We can add a SourceID to an Audit""" record = AuditRecord() record << UserRef("glow1") record << LocationRef("hillview") record << DateTimeStamp(datetime.utcnow()) record << SourceID("12345") tested = obj_to_doc(record) self.assertEqual("AuditRecord", tested.tag) self.assertEqual("SourceID", list(tested)[-1].tag) self.assertEqual("12345", list(tested)[-1].text) class TestSiteRef(unittest.TestCase): def test_uuid_type(self): """We can define a SiteRef using a UUID""" siteref = SiteRef(oid="E20DEF2D-0CD4-4B3A-B963-AC7D592CB85B") siteref.add_attribute("LocationOIDType", "SiteUUID") tested = obj_to_doc(siteref) self.assertEqual("SiteRef", tested.tag) self.assertEqual( "E20DEF2D-0CD4-4B3A-B963-AC7D592CB85B", tested.get("LocationOID") ) self.assertEqual("SiteUUID", tested.get("mdsol:LocationOIDType")) def test_uuid_type(self): """We can define a SiteRef using a UUID""" siteref = SiteRef(oid="E20DEF2D-0CD4-4B3A-B963-AC7D592CB85B") siteref.add_attribute("LocationOIDType", "SiteUUID") tested = obj_to_doc(siteref) self.assertEqual("SiteRef", tested.tag) self.assertEqual( "E20DEF2D-0CD4-4B3A-B963-AC7D592CB85B", tested.get("LocationOID") ) self.assertEqual("SiteUUID", tested.get("mdsol:LocationOIDType"))
import pytest from orderedset._orderedset import OrderedSet from plenum.common.event_bus import InternalBus from plenum.common.messages.node_messages import PrePrepare from plenum.common.startable import Mode from plenum.common.constants import POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CURRENT_PROTOCOL_VERSION, AUDIT_LEDGER_ID, \ TXN_PAYLOAD, TXN_PAYLOAD_DATA, AUDIT_TXN_VIEW_NO, AUDIT_TXN_PP_SEQ_NO, AUDIT_TXN_DIGEST from plenum.common.timer import QueueTimer from plenum.common.util import get_utc_epoch from plenum.server.batch_handlers.node_reg_handler import NodeRegHandler from plenum.server.consensus.primary_selector import RoundRobinConstantNodesPrimariesSelector from plenum.server.database_manager import DatabaseManager from plenum.server.propagator import Requests from plenum.server.quorums import Quorums from plenum.server.replica import Replica from plenum.test.conftest import getValueFromModule from plenum.test.helper import MockTimestamp, sdk_random_request_objects, create_pre_prepare_params, \ create_prepare_from_pre_prepare from plenum.test.testing_utils import FakeSomething from plenum.test.bls.conftest import fake_state_root_hash, fake_multi_sig, fake_multi_sig_value class ReplicaFakeNode(FakeSomething): def __init__(self, viewNo, quorums, ledger_ids): node_names = ["Alpha", "Beta", "Gamma", "Delta"] node_stack = FakeSomething( name="fake stack", connecteds=set(node_names) ) self.replicas = [] self.viewNo = viewNo audit_ledger = FakeSomething(size=0, get_last_txn=lambda *args: None, getAllTxn=lambda *args, **kwargs: []) db_manager = DatabaseManager() db_manager.register_new_database(AUDIT_LEDGER_ID, audit_ledger) super().__init__( name="fake node", ledger_ids=ledger_ids, _viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, monitor=FakeSomething(isMasterDegraded=lambda: False), requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries=[], get_validators=lambda: [], db_manager=db_manager, write_manager=FakeSomething(database_manager=db_manager, node_reg_handler=NodeRegHandler(db_manager), apply_request=lambda req, cons_time: None), timer=QueueTimer(), poolManager=FakeSomething(node_names_ordered_by_rank=lambda: node_names), primaries_selector=RoundRobinConstantNodesPrimariesSelector(node_names) ) @property def viewNo(self): return self._viewNo @viewNo.setter def viewNo(self, viewNo): self._viewNo = viewNo for replica in self.replicas: replica._consensus_data.viewNo = viewNo @property def is_synced(self) -> bool: return Mode.is_done_syncing(self.mode) @property def isParticipating(self) -> bool: return self.mode == Mode.participating def add_replica(self, replica): self.replicas.append(replica) for replica in self.replicas: replica._consensus_data.view_no = self.viewNo @pytest.fixture(scope='function', params=[0, 10]) def viewNo(tconf, request): return request.param @pytest.fixture(scope='function') def ledger_ids(): return [POOL_LEDGER_ID] @pytest.fixture(scope='function', params=[0]) def inst_id(request): return request.param @pytest.fixture(scope="function") def mock_timestamp(): return get_utc_epoch @pytest.fixture() def fake_requests(): return sdk_random_request_objects(10, identifier="fake_did", protocol_version=CURRENT_PROTOCOL_VERSION) @pytest.fixture() def txn_roots(): return ["AAAgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ", "BBBJmfG5DYAE8ZcdTTFMiwcZaDN6CRVdSdkhBXnkYPio", "CCCJmfG5DYAE8ZcdTTFMiwcZaDN6CRVdSdkhBXnkYPio", "DDDJmfG5DYAE8ZcdTTFMiwcZaDN6CRVdSdkhBXnkYPio"] @pytest.fixture() def state_roots(fake_state_root_hash): return ["EuDgqga9DNr4bjH57Rdq6BRtvCN1PV9UX5Mpnm9gbMAZ", fake_state_root_hash, "D95JmfG5DYAE8ZcdTTFMiwcZaDN6CRVdSdkhBXnkYPio", None] @pytest.fixture(scope='function') def replica(tconf, viewNo, inst_id, ledger_ids, mock_timestamp, fake_requests, txn_roots, state_roots, request): node = ReplicaFakeNode(viewNo=viewNo, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)), ledger_ids=ledger_ids) bls_bft_replica = FakeSomething( gc=lambda *args: None, update_pre_prepare=lambda params, l_id: params, validate_pre_prepare=lambda a, b: None, validate_prepare=lambda a, b: None, update_prepare=lambda a, b: a, process_prepare=lambda a, b: None, process_pre_prepare=lambda a, b: None, process_order=lambda *args: None ) replica = Replica( node, instId=inst_id, isMaster=inst_id == 0, config=tconf, bls_bft_replica=bls_bft_replica, get_current_time=mock_timestamp, get_time_for_3pc_batch=mock_timestamp ) node.add_replica(replica) ReplicaFakeNode.master_last_ordered_3PC = replica.last_ordered_3pc replica._ordering_service.last_accepted_pre_prepare_time = replica.get_time_for_3pc_batch() replica.primaryName = "Alpha:{}".format(replica.instId) replica.primaryNames[replica.viewNo] = replica.primaryName replica._ordering_service.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger] replica._ordering_service.get_state_root_hash = lambda ledger, to_str=False: state_roots[ledger] replica._ordering_service._revert = lambda ledgerId, stateRootHash, reqCount: None replica._ordering_service.post_batch_creation = lambda three_pc_batch: None replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet() replica._ordering_service._get_primaries_for_ordered = lambda pp: [replica.primaryName] replica._ordering_service._get_node_reg_for_ordered = lambda pp: ["Alpha", "Beta", "Gamma", "Delta"] def reportSuspiciousNodeEx(ex): assert False, ex replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx return replica @pytest.fixture(scope='function') def primary_replica(replica): replica.primaryName = replica.name return replica @pytest.fixture(scope='function') def replica_with_requests(replica, fake_requests): replica._ordering_service._apply_pre_prepare = lambda a: (fake_requests, [], [], False) for req in fake_requests: replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID].add(req.key) replica.requests.add(req) replica.requests.set_finalised(req) return replica @pytest.fixture(scope="function", params=['BLS_not_None', 'BLS_None']) def multi_sig(fake_multi_sig, request): if request.param == 'BLS_None': return None return fake_multi_sig @pytest.fixture(scope="function") def pre_prepare(replica, state_roots, txn_roots, multi_sig, fake_requests): params = create_pre_prepare_params(state_root=state_roots[DOMAIN_LEDGER_ID], ledger_id=DOMAIN_LEDGER_ID, txn_root=txn_roots[DOMAIN_LEDGER_ID], bls_multi_sig=multi_sig, view_no=replica.viewNo, inst_id=replica.instId, pool_state_root=state_roots[POOL_LEDGER_ID], audit_txn_root=txn_roots[AUDIT_LEDGER_ID], reqs=fake_requests) pp = PrePrepare(*params) return pp @pytest.fixture(scope="function") def prepare(pre_prepare): return create_prepare_from_pre_prepare(pre_prepare)
""" make_base_installers.py -- Build the installers for one or more of the supported operating systems, depending on options given. Runs on Linux systems only. Usage: python make_base_installer.py m|l|w|i|a|t path/to/trunk/ pubkey privkey output/dir/ [version of seattle] [--wg path/to/Windows/GUI/builder/makensis.exe] Flags: m,l,w,i,a,d,t represent the OS for which the base installer is being created. m = Macintosh, l = Linux, w = Windows, i = Windows Mobile, d = Android, a = all systems. t = include tests in installer. NOTE: The Windows GUI installer will ONLY be built if the 'w' or 'a' options are passed ALONG WITH the '--wg' option. Example of usage on command line: python ./Seattle/trunk/dist/make_base_installers.py a ./Seattle/trunk/ user.publickey user.privatekey ./Installers/ 1.0a """ import os import sys import shutil import subprocess import tempfile import zipfile import tarfile import clean_folder # The name of the base directory in each installer. BASE_INSTALL_DIR = "seattle" BASE_PROGRAM_FILES_DIR = "seattle/seattle_repy" # The base name of each installer = for instance, "seattle_win.zip" INSTALLER_NAME = "seattle" # The path to the directory, relative the trunk, of the OS-specific files. WINDOWS_GUI_PATH = "/dist/win_gui" WINDOWS_PATH = "/dist/win/scripts" WINMOB_PATH = "/dist/winmob/scripts" LINUX_PATH = "/dist/linux/scripts" MAC_PATH = "/dist/mac/scripts" # The path to the directory, relative the trunk, of the OS-specific script # wrappers. WINDOWS_SCRIPT_WRAPPERS_PATH = "/dist/script_wrappers/win" LINUX_SCRIPT_WRAPPERS_PATH = "/dist/script_wrappers/linux" MAC_SCRIPT_WRAPPERS_PATH = "/dist/script_wrappers/mac" # The path to the Windows GUI builder. WINDOWS_GUI_BUILDER_PATH = "" def get_inst_name(dist, version): """ <Purpose> Given the OS and the version, returns what the name of the installer will be. <Arguments> dist: The OS that the installer is intended for, should be Windows, Macintosh, Linux, Winmob, or Android. version: A string to be appended between the dist and the extension - for instance, if version is "0.1d", then the Linux installer name will be "seattle_linux0.1d.tgz". <Exceptions> None. <Side Effects> None. <Returns> A string of the installer name for the specified OS and version. """ if version: base_name = INSTALLER_NAME + "_" + version + "_" + dist else: base_name = INSTALLER_NAME + "_" + dist if "win" in dist or "Win" in dist or "WIN" in dist: if "gui" in dist or "GUI" in dist: base_name += ".exe" else: base_name += ".zip" elif "android" in dist: base_name += ".zip" else: base_name += ".tgz" return base_name def check_flags(flags): """ <Purpose> Checks that each character in 'flags' is a valid flag and that there is at least one valid flag (i.e., m,w,l,i,d,a). <Arguments> flags: String containing the flags passed in by the user. <Exceptions> None. <Side Effects> None. <Returns> If there is an invalid flag, returns a tuple containing False and the offending flag(s). If there is not at least one valid flag, this function returns a tuple containing False and the empty strings. Otherwise, if there are no problems, a tuple with True and the empty string is returned. """ valid_flags = "mwlidat" required_flags = "mwliad" got_required_flag = False no_invalid_flags = True badflag = "" # Check flags for invalid flags and required flags. for char in flags: if char not in valid_flags: no_invalid_flags = False if char not in badflag: badflag += char elif char in required_flags: got_required_flag = True # Return results. if no_invalid_flags and got_required_flag: return (True, badflag) else: return (False, badflag) def prepare_gen_files(trunk_location, temp_install_dir, include_tests, pubkey, privkey, finalfiles): """ <Purpose> Prepare the general non-installer-specific files (needed for all installers) and deposit them into the temporary folder designated to hold the files that will be present in the base installer(s), including the metainfo file. <Arguments> trunk_location: The path to the trunk of the repository, used to find all the requisite files that appear in the installer. pubkey: The path to a public key that will be used to generate the metainfo file. privkey: The path to a private key that will be used to generate the metainfo file. temp_install_dir: The temporary directory where the general files to be included in the installer will be placed. include_tests: Boolean variable specifying whether or not to include tests in installer. finalfiles: Boolean variable specifying whether or not to prepare the final files after the metafile has been written <Exceptions> IOError on bad file paths. <Side Effects> All general non-installer-specific files placed into the specified temporary installation directory. <Returns> List of all the files in the temporary installation directory, which will be added to the installer tarball. """ # Run preparetest to generate and place all the general installation files # in the temporary installation directory. # To run /trunk/preparetest.py, we must be in that directory (probably a bug # in preparetest.py?) original_dir = os.getcwd() preparetest_dir = trunk_location + os.sep + "dist" os.chdir(preparetest_dir) if include_tests: p = subprocess.Popen([sys.executable, preparetest_dir + os.sep + "preparetest.py", "-t", temp_install_dir]) p.wait() else: p = subprocess.Popen([sys.executable, preparetest_dir + os.sep + "preparetest.py", temp_install_dir]) p.wait() os.chdir(original_dir) # Copy the benchmarking scripts to the installer directory shutil.copy2(trunk_location + "/resource/benchmark_resources.py", temp_install_dir) shutil.copy2(trunk_location + "/resource/Mac_BSD_resources.py", temp_install_dir) shutil.copy2(trunk_location + "/resource/create_installer_state.py", temp_install_dir) shutil.copy2(trunk_location + "/resource/measuredisk.py", temp_install_dir) shutil.copy2(trunk_location + "/resource/vessel.restrictions", temp_install_dir) shutil.copy2(trunk_location + "/resource/Linux_resources.py", temp_install_dir) shutil.copy2(trunk_location + "/resource/measure_random.py", temp_install_dir) shutil.copy2(trunk_location + "/resource/Win_WinCE_resources.py", temp_install_dir) # Copy the universal installer and uninstaller to the program directory. shutil.copy2(trunk_location + "/dist/seattleinstaller.py", temp_install_dir) shutil.copy2(trunk_location + "/dist/seattleuninstaller.py", temp_install_dir) # Copy the script that stops all running seattle processes. shutil.copy2(trunk_location + "/dist/stop_all_seattle_processes.py", temp_install_dir) # Copy the script that will update old crontab entries on Linux and Darwin # systems to the new 2009 seattle crontab entry. This must remain in the # installer indefinitely (or at least for a while) in the event that a user # installed seattle with the previous, old crontab entry, then lost permission # to modify his crontab. In the event that he regains permission to modify # his crontab, the previously installed crontab entry must be updated. shutil.copy2(trunk_location + "/dist/update_crontab_entry.py", temp_install_dir) # Clean the folder of unnecessary files before generating metafile. clean_folder.clean_folder(trunk_location + "/dist/initial_files.fi", temp_install_dir) # To run writemetainfo.py, we must be in that directory (probably a bug in # writemetainfo.py?) os.chdir(temp_install_dir) # Generate the metainfo file. p = subprocess.Popen([sys.executable, temp_install_dir + os.sep + "writemetainfo.py", privkey, pubkey, "-n"]) p.wait() os.chdir(original_dir) # If specified, copy remaining files that should not be included in the # metafile into the temporary installation directory. if finalfiles: # Copy the static files to the program directory. shutil.copy2(trunk_location + "/dist/nodeman.cfg", temp_install_dir) shutil.copy2(trunk_location + "/dist/resources.offcut", temp_install_dir) # Run clean_folder a final time to ensure the final directory contains all # the necessary files now that the last files have been added. clean_folder.clean_folder(trunk_location + "/dist/final_files.fi", temp_install_dir) return os.listdir(temp_install_dir) def package_win_gui(trunk_location, temp_tarball_dir, zip_inst_name, gui_inst_name): """ <Purpose> Packages the installation files for Windows into a GUI executable file and adds the specific installation scripts for this OS. This function extracts the contents of the already-created Windows zipfile installer because the zipfile installer contains special Windows files that are not located anywhere else in the trunk. <Arguments> trunk_location: The location of the repository trunk. temp_tarball_dir: The path to the directory in which the installer executable will be stored. zip_inst_name: The name of the Windows zipfile installer. gui_inst_name: The name that the Windows GUI executable file will have. <Exceptions> IOError on bad file paths. <Side Effects> Puts the final executable in the temporary tarball directory. <Returns> None. """ # Create a subdirectory where the GUI installer will be created, and copy all # necessary files there. win_gui_location = tempfile.mkdtemp() shutil.copy(trunk_location + os.sep + WINDOWS_GUI_PATH + os.sep + "seattle_gui_creator.nsi", win_gui_location) # Extract the zipfile to the win_gui_location to get all the contents that # will be compressed into the Windows gui installer. installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + zip_inst_name, 'r', zipfile.ZIP_DEFLATED) installer_zipfile.extractall(win_gui_location) shutil.copy(trunk_location + os.sep + "dist" + os.sep + "extract_custom_info.py",win_gui_location + os.sep + "seattle" + os.sep + "seattle_repy") # Change directories to win_gui_location because the Windows gui creator # will not work when full file paths are passed in as arguments for some # reason. original_dir = os.getcwd() os.chdir(win_gui_location) # Create the Win GUI executable with the Windows GUI builder (makensis.exe) # via subprocess. gui_creator = subprocess.Popen([WINDOWS_GUI_BUILDER_PATH, "seattle_gui_creator.nsi"], stdout=subprocess.PIPE) # The communicate() function must be called to prevent the subprocess call # above from deadlocking. gui_creator.communicate() gui_creator.wait() # The Windows GUI builder script has a built-in name that it gives to the # installer (seattle_win_gui.exe), so rename this file to gui_inst_name. os.rename("seattle_win_gui.exe", gui_inst_name) # Change back to the original directory. os.chdir(original_dir) # Put the new GUI installer into the temp_tarball_dir with the other # installers. shutil.copy(win_gui_location + os.sep + gui_inst_name,temp_tarball_dir) # Remove the temporary GUI installer directory. shutil.rmtree(win_gui_location) def package_win_or_winmob(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files): """ <Purpose> Packages the installation files for Windows or Windows Mobile into a zipfile and adds the specific installation scripts for this OS. <Arguments> trunk_location: The location of the repository trunk. temp_install_dir: The path to the temporary installation directory. temp_tarball_dir: The path to the directory in which the installer zipfile(s) is stored. inst_name: The name that the final installer should have. gen_files: A list of the general non-installer-specific files located in the temporary installer directory. <Exceptions> IOError on bad file paths. <Side Effects> Puts the final zipfile in the temporary tarball directory. <Returns> None. """ # Open the Windows zipfile for writing, or create a zipfile for Windows # Mobile. if not "winmob" in inst_name: shutil.copy2(trunk_location + "/dist/win/partial_win.zip", temp_tarball_dir + os.sep + inst_name) installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + inst_name, "a", zipfile.ZIP_DEFLATED) else: installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + inst_name, "w", zipfile.ZIP_DEFLATED) # Put all general program files into zipfile. for fname in gen_files: if os.path.isdir(temp_install_dir + os.sep + fname): write_files_in_dir_to_zipfile(temp_install_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname + os.sep, installer_zipfile) else: installer_zipfile.write(temp_install_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname) # Put all files specific to this installer into zipfile. # First, copy all scripts that belong in the BASE_PROGRAM_FILES_DIR. if not "winmob" in inst_name: specific_installer_dir = trunk_location + os.sep + WINDOWS_PATH else: specific_installer_dir = trunk_location + os.sep + WINMOB_PATH specific_files = os.listdir(specific_installer_dir) # Add OS-specific files to the zipfile. for fname in specific_files: if not fname.startswith(".") and fname != "manifest.txt": # Add the README and LICENSE files to the highest-level directory # (BASE_INSTALL_DIR). if "LICENSE" in fname or "README" in fname: installer_zipfile.write(specific_installer_dir + os.sep + fname, BASE_INSTALL_DIR + os.sep + fname) else: installer_zipfile.write(specific_installer_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname) # Second, copy all script wrappers (which call those in the # BASE_PROGRAM_FILES_DIR) to the BASE_INSTALL_DIR. if "winmob" in inst_name: return else: script_wrappers_dir = trunk_location + os.sep + WINDOWS_SCRIPT_WRAPPERS_PATH script_wrappers = os.listdir(script_wrappers_dir) # Add script wrappers to the zipfile. for fname in script_wrappers: if not fname.startswith("."): installer_zipfile.write(script_wrappers_dir + os.sep + fname, BASE_INSTALL_DIR + os.sep + fname) installer_zipfile.close() def write_files_in_dir_to_zipfile(sourcepath, arcpath, zipfile): """ <Purpose> Inserts the files in the current directory into the specified zipfile. <Arguments> sourcepath: The source path of the files to add. arcpath: The zip file's internal destination path to write to. zipfile: The zip file to write to. files: If specified, only these files are copied. Only files in the immediate directory can be specified. skipfiles: If specified, these files will be skipped. Only files in the immediate directory can be skipped. <Side Effects> Copies the files that are in sourcepath to arcpath in the zipfile. If files is specified, then only those files are copied. <Exceptions> None <Return> None """ files = os.listdir(sourcepath) for fname in files: sourcefilepath = sourcepath + os.sep + fname targetfilepath = arcpath + os.sep + fname if os.path.isfile(sourcefilepath): zipfile.write(sourcefilepath, targetfilepath) else: write_files_in_dir_to_zipfile(sourcefilepath, targetfilepath, zipfile) def package_linux_or_mac(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files): """ <Purpose> Packages the installation files specific to Linux or Macintosh into a tarball and adds the specific installation scripts for this OS. <Arguments> trunk_location: The location of the repository trunk. temp_install_dir: The path to the temporary installation directory. temp_tarball_dir: The path to the directory in which the installer tarball(s) is stored. inst_name: The name that the final installer should have. gen_files: A list of the general non-installer-specific files located in the temporary installer directory. <Exceptions> IOError on bad file paths. <Side Effects> Puts the final tarball in the temporary tarball directory. <Returns> None. """ installer_tarfile = tarfile.open(temp_tarball_dir + os.sep + inst_name, "w:gz") # Put all general installer files into the tar file. for fname in gen_files: if fname not in ['pyreadline']: installer_tarfile.add(temp_install_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname, True) # Put all Linux- and Mac-specific files in to tarball. # First, copy all scripts that belong in BASE_PROGRAM_FILES_DIR. if "linux" in inst_name: specific_installer_dir = trunk_location + os.sep + LINUX_PATH else: specific_installer_dir = trunk_location + os.sep + MAC_PATH specific_files = os.listdir(specific_installer_dir) # Add the OS-specific files to the tarfile. for fname in specific_files: if not fname.startswith(".") and fname != "manifest.txt": if "README" in fname or "LICENSE" in fname: installer_tarfile.add(specific_installer_dir + os.sep + fname, BASE_INSTALL_DIR + os.sep + fname, False) else: installer_tarfile.add(specific_installer_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname, False) # Second, copy all script wrappers (which call those in the # BASE_PROGRAM_FILES_DIR) to the BASE_INSTALL_DIR. if "linux" in inst_name: script_wrappers_dir = trunk_location + os.sep + LINUX_SCRIPT_WRAPPERS_PATH else: script_wrappers_dir = trunk_location + os.sep + MAC_SCRIPT_WRAPPERS_PATH script_wrappers = os.listdir(script_wrappers_dir) # Add script wrappers to the zipfile. for fname in script_wrappers: if not fname.startswith("."): installer_tarfile.add(script_wrappers_dir + os.sep + fname, BASE_INSTALL_DIR + os.sep + fname, False) installer_tarfile.close() def package_android(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files): """ <Purpose> Packages the installation files specific to Android into a tarball and adds the specific installation scripts for this OS. THIS IS CUT AND PASTED FROM ABOVE WITH ONLY MINOR CHANGES. NEEDS REFACTOR! <Arguments> trunk_location: The location of the repository trunk. temp_install_dir: The path to the temporary installation directory. temp_tarball_dir: The path to the directory in which the installer zipfile(s) is stored. inst_name: The name that the final installer should have. gen_files: A list of the general non-installer-specific files located in the temporary installer directory. <Exceptions> IOError on bad file paths. <Side Effects> Puts the final zipfile in the temporary tarball directory. <Returns> None. """ installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + inst_name, "w", zipfile.ZIP_DEFLATED) # Put all general program files into zipfile. for fname in gen_files: if os.path.isdir(temp_install_dir + os.sep + fname): if fname not in ['pyreadline']: write_files_in_dir_to_zipfile(temp_install_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname + os.sep, installer_zipfile) else: installer_zipfile.write(temp_install_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname) # Put generic files in the zipfile. (Same as Linux) specific_installer_dir = trunk_location + os.sep + LINUX_PATH specific_files = os.listdir(specific_installer_dir) # Add the OS-specific files to the zipfile. for fname in specific_files: if not fname.startswith(".") and fname != "manifest.txt": if "README" in fname or "LICENSE" in fname: installer_zipfile.write(specific_installer_dir + os.sep + fname, BASE_INSTALL_DIR + os.sep + fname) else: installer_zipfile.write(specific_installer_dir + os.sep + fname, BASE_PROGRAM_FILES_DIR + os.sep + fname) # Second, copy all script wrappers (which call those in the # BASE_PROGRAM_FILES_DIR) to the BASE_INSTALL_DIR. script_wrappers_dir = trunk_location + os.sep + LINUX_SCRIPT_WRAPPERS_PATH script_wrappers = os.listdir(script_wrappers_dir) # Add script wrappers to the zipfile. for fname in script_wrappers: if not fname.startswith("."): installer_zipfile.write(script_wrappers_dir + os.sep + fname, BASE_INSTALL_DIR + os.sep + fname) installer_zipfile.close() def test_arguments(arguments): """ Check that the arguments supplied on the command line make sense. """ # Test argument flags if len(arguments) < 6: print "Too few arguments." return False elif len(arguments) > 9: print "Too many arguments." return False flags = arguments[1] passed, offenses = check_flags(flags) if not passed: if offenses == "": print "Requires at least one of these flags: m,l,w,i,d,a" else: print "Invalid flag(s): " + offenses return False # Validate the existence of argument's paths and files trunkdir, pubkey, privkey, outdir = arguments[2:6] if not os.path.exists(trunkdir): raise IOError("Trunk not found at " + trunkdir) if not os.path.exists(outdir): raise IOError("Output directory does not exist at " + outdir) if not os.path.exists(pubkey): raise IOError("Public key not found at " + pubkey) if not os.path.exists(privkey): raise IOError("Private key not found at " + privkey) # All arguments are valid. return True def usage(): print """ USAGE: python make_base_installer.py m|l|w|i|d|a|t path/to/trunk/ pubkey privkey output/dir/ [version of seattle] [--wg path/to/Windows/GUI/builder/makensis.exe] FLAGS: m,l,w,i,d,a,t represent the OS for which the base installer is being created. m = Macintosh, l = Linux, w = Windows, i = Windows Mobile, d = Android, a = all systems; t = include tests in installer. NOTE: The Windows GUI installer will ONLY be built if the 'w' or 'a' options are passed ALONG WITH the '--wg' option." """ def main(): # Prepare to create installer(s). # Test arguments and find full pathnames. arguments_valid = test_arguments(sys.argv) if not arguments_valid: usage() return # Reaching this point means all arguments are valid, so set the variables and # get full pathnames when necessary. # NOTE: IF MORE OPTIONS ARE EVER ADDED TO THIS PROGRAM, CONSIDER USING THE # PYTHON MODULE getopt TO PARSE THE OPTIONS SINCE THE BELOW LOGIC WILL # START TO GET REALLY COMPLICATED. installer_type = sys.argv[1] trunk_location = os.path.realpath(sys.argv[2]) output_dir = os.path.realpath(sys.argv[5]) pubkey = os.path.realpath(sys.argv[3]) privkey = os.path.realpath(sys.argv[4]) version = "" # Figure out if the optional version number or the path to the Windows GUI # builder was passed in. if len(sys.argv) > 6: if len(sys.argv) == 7: # Only one extra option was passed, so it must be the version number. version = sys.argv[6] if version == "--wg": print "Windows GUI builder path not specified" usage() return else: global WINDOWS_GUI_BUILDER_PATH if sys.argv[6] == "--wg": # The path to the Windows GUI builder was passed in. if len(sys.argv) == 7: # The path was not given with the "--wg" option. usage() return elif len(sys.argv) > 8: # The version number was also given. version = sys.argv[8] WINDOWS_GUI_BUILDER_PATH = sys.argv[7] else: # The version must have been given before the path to the Windows GUI # builder if the path was given at all. version = sys.argv[6] if sys.argv[7] != "--wg": # An extraneous option must have been given. usage() return else: WINDOWS_GUI_BUILDER_PATH = sys.argv[8] if WINDOWS_GUI_BUILDER_PATH: # Confirm that the path exists. if not os.path.lexists(WINDOWS_GUI_BUILDER_PATH): print "Invalid path to the Windows GUI builder: ", print WINDOWS_GUI_BUILDER_PATH print "Failed to build installers." return else: # Get full file path. WINDOWS_GUI_BUILDER_PATH = os.path.realpath(WINDOWS_GUI_BUILDER_PATH) # Begin creating base installers. print "Creating installer(s) - this may take a few moments...." # Create temporary directory for the files to go into the installer. temp_install_dir = tempfile.mkdtemp() # Create temporary directory for creating the tarball(s) / zipfile(s). temp_tarball_dir = tempfile.mkdtemp() # Prepare all general non-installer-specific files to go into installer. print "Preparing all general non-OS-specific files...." include_tests = False if "t" in installer_type: include_tests = True gen_files = prepare_gen_files(trunk_location, temp_install_dir, include_tests, pubkey, privkey, True) print "Complete." # Build individual installer(s). print "Customizing installer(s) for the specified operating system(s)...." created_installers = [] # Package the Windows installer. if "w" in installer_type or "a" in installer_type: inst_name = get_inst_name("win", version) package_win_or_winmob(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files) created_installers.append(inst_name) # See if we need to create the Windows GUI installer if WINDOWS_GUI_BUILDER_PATH: inst_name_gui = get_inst_name("win_gui", version) package_win_gui(trunk_location, temp_tarball_dir, inst_name, inst_name_gui) created_installers.append(inst_name_gui) # Package the Linux installer. if "l" in installer_type or "a" in installer_type: inst_name = get_inst_name("linux", version) package_linux_or_mac(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files) created_installers.append(inst_name) # Package the Mac installer. if "m" in installer_type or "a" in installer_type: inst_name = get_inst_name("mac", version) package_linux_or_mac(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files) created_installers.append(inst_name) # Package the Windows Mobile installer. if "i" in installer_type or "a" in installer_type: inst_name = get_inst_name("winmob", version) package_win_or_winmob(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files) created_installers.append(inst_name) # Package the Android installer. if "d" in installer_type or "a" in installer_type: inst_name = get_inst_name("android", version) package_android(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files) created_installers.append(inst_name) # Move the installer tarball(s) / zipfile(s) to the specified output # directory. for tarball in os.listdir(temp_tarball_dir): shutil.copy2(temp_tarball_dir + os.sep + tarball, output_dir) # Remove the temporary directories shutil.rmtree(temp_install_dir) shutil.rmtree(temp_tarball_dir) print print "Finished." print print "The following base installers have been placed in " + output_dir + ":" for installer in created_installers: print installer if __name__ == "__main__": main()
import warnings import numpy import six from chainer import configuration from chainer import functions from chainer import initializer from chainer import link from chainer.links.caffe.protobuf3 import caffe_pb2 as caffe_pb from chainer.links.connection import convolution_2d from chainer.links.connection import deconvolution_2d from chainer.links.connection import linear from chainer.links.connection import scale from chainer.links.normalization import batch_normalization from chainer.utils import argument try: # This method is undocumented, but is required to read large size of # model files when a user uses cpp-implementation. from google.protobuf.pyext import _message _message.SetAllowOversizeProtos(True) except ImportError: pass _type_to_method = {} _oldname_to_method = {} def _layer(typ, oldname): def decorator(meth): global _type_to_method _type_to_method[typ] = meth if oldname is not None: typevalue = getattr(caffe_pb.V1LayerParameter, oldname) _oldname_to_method[typevalue] = meth return meth return decorator class _Blob(initializer.Initializer): chunk_size = 1024 * 1024 def __init__(self, blob): super(_Blob, self).__init__() self.data = blob.data def __call__(self, array): array = array.ravel() size = len(array) indices = list(range(0, size, self.chunk_size)) # Rather than accessing Protobuf's RepeatedScalar fields directly, # creating a intermediate list by indexing is more efficient due to # the implementation of the Python extension of Protobuf. # To avoid allocating excessively large lists, we limit the length # of lists by `chunk_size`. for start, end in zip(indices, indices[1:] + [size]): array[start:end] = self.data[start:end] class _ConvolutionBlob(_Blob): def __init__(self, blob, group): super(_ConvolutionBlob, self).__init__(blob) self.group = group def __call__(self, array): n_out, n_in = array.shape[:2] part_out = n_out // self.group part_in = n_in // self.group array[...] = 0 part_size = len(self.data) // self.group for i in six.moves.range(self.group): out_slice = slice(i * part_out, (i + 1) * part_out) in_slice = slice(i * part_in, (i + 1) * part_in) w = array[out_slice, in_slice] data = numpy.array(self.data[i * part_size:(i + 1) * part_size]) w[:] = data.reshape(w.shape) class CaffeFunction(link.Chain): """Caffe emulator based on the model file of Caffe. Given a protocol buffers file of a Caffe model, this class loads and emulates it on :class:`~chainer.Variable` objects. It supports the official reference models provided by BVLC. .. note:: CaffeFunction ignores the following layers: - Layers that CaffeFunction does not support (including data layers) - Layers that have no top blobs - Layers whose bottom blobs are incomplete (i.e., some or all of them are not given nor computed) .. warning:: It does not support full compatibility against Caffe. Some layers and configurations are not implemented in Chainer yet, though the reference models provided by the BVLC team are supported except data layers. .. admonition:: Example Consider we want to extract the (unnormalized) log class probability of given images using BVLC reference CaffeNet. The model can be downloaded from: http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel We want to compute the ``fc8`` blob from the ``data`` blob. It is simply written as follows:: # Load the model func = CaffeFunction('path/to/bvlc_reference_caffenet.caffemodel') # Minibatch of size 10 x_data = numpy.ndarray((10, 3, 227, 227), dtype=numpy.float32) ... # (Fill the minibatch here) # Forward the pre-trained net x = Variable(x_data) y, = func(inputs={'data': x}, outputs=['fc8']) The result ``y`` contains the Variable corresponding to the ``fc8`` blob. The computational graph is memorized as a usual forward computation in Chainer, so we can run backprop through this pre-trained net. Args: model_path (str): Path to the binary-proto model file of Caffe. Attributes: forwards (dict): A mapping from layer names to corresponding functions. """ def __init__(self, model_path): super(CaffeFunction, self).__init__() net = caffe_pb.NetParameter() with open(model_path, 'rb') as model_file: net.MergeFromString(model_file.read()) self.forwards = {} self.split_map = {} self.layers = [] if net.layer: for layer in net.layer: meth = _type_to_method.get(layer.type) if meth: meth(self, layer) else: warnings.warn( 'Skip the layer "%s", since CaffeFunction does not' 'support %s layer' % (layer.name, layer.type)) else: # v1 format for layer in net.layers: meth = _oldname_to_method.get(layer.type) if meth: meth(self, layer) else: warnings.warn( 'Skip the layer "%s", since CaffeFunction does not' 'support it' % layer.name) def forward(self, inputs, outputs, disable=(), **kwargs): """forward(self, inputs, outputs, disable=()) Executes a sub-network of the network. This function acts as an interpreter of the network definition for Caffe. On execution, it interprets each layer one by one, and if the bottom blobs are already computed, then emulates the layer and stores output blobs as :class:`~chainer.Variable` objects. Args: inputs (dict): A dictionary whose key-value pairs indicate initial correspondences between blob names and :class:`~chainer.Variable` objects. outputs (Iterable): A list of blob names whose corresponding :class:`~chainer.Variable` objects are returned. disable (Iterable): A list of layer names that will be ignored during the forward computation. Returns: tuple: A tuple of output :class:`~chainer.Variable` objects corresponding to elements of the `outputs` argument. """ if kwargs: argument.check_unexpected_kwargs( kwargs, train='train argument is not supported anymore. ' 'Use chainer.using_config') argument.assert_kwargs_empty(kwargs) variables = dict(inputs) disable = set(disable) for func_name, bottom, top in self.layers: if (func_name in disable or func_name not in self.forwards or any(blob not in variables for blob in bottom)): continue func = self.forwards[func_name] input_vars = tuple(variables[blob] for blob in bottom) output_vars = func(*input_vars) if not isinstance(output_vars, (tuple, list)): output_vars = output_vars, for var, name in zip(output_vars, top): variables[name] = var self.variables = variables return tuple(variables[blob] for blob in outputs) def _add_layer(self, layer): bottom = [] for blob_name in layer.bottom: bottom.append(self.split_map.get(blob_name, blob_name)) self.layers.append((layer.name, bottom, list(layer.top))) @_layer('Concat', 'CONCAT') def _setup_concat(self, layer): param = layer.concat_param axis = param.axis if axis == 1 and param.concat_dim != 1: axis = param.concat_dim self.forwards[layer.name] = _ListArgumentFcuntion( functions.concat, axis=axis) self._add_layer(layer) @_layer('Convolution', 'CONVOLUTION') def _setup_convolution(self, layer): blobs = layer.blobs param = layer.convolution_param ksize = _get_ksize(param) stride = _get_stride(param) pad = _get_pad(param) num = _get_num(blobs[0]) channels = _get_channels(blobs[0]) bias_term = param.bias_term n_in = channels * param.group n_out = num func = convolution_2d.Convolution2D( n_in, n_out, ksize, stride, pad, nobias=not bias_term, initialW=_ConvolutionBlob(blobs[0], param.group), initial_bias=_Blob(blobs[1]) if bias_term else None) with self.init_scope(): setattr(self, layer.name, func) self.forwards[layer.name] = _CallChildLink(self, layer.name) self._add_layer(layer) @_layer('Deconvolution', 'DECONVOLUTION') def _setup_deconvolution(self, layer): blobs = layer.blobs param = layer.convolution_param ksize = _get_ksize(param) stride = _get_stride(param) pad = _get_pad(param) num = _get_num(blobs[0]) channels = _get_channels(blobs[0]) bias_term = param.bias_term n_in = num n_out = channels * param.group func = deconvolution_2d.Deconvolution2D( n_in, n_out, ksize, stride, pad, nobias=not bias_term, initialW=_ConvolutionBlob(blobs[0], param.group), initial_bias=_Blob(blobs[1]) if bias_term else None) with self.init_scope(): setattr(self, layer.name, func) self.forwards[layer.name] = _CallChildLink(self, layer.name) self._add_layer(layer) @_layer('Data', 'DATA') def _setup_data(self, layer): # We silently skip the data layer. pass @_layer('Dropout', 'DROPOUT') def _setup_dropout(self, layer): param = layer.dropout_param self.forwards[layer.name] = _SingleArgumentFunction( functions.dropout, ratio=param.dropout_ratio) self._add_layer(layer) @_layer('InnerProduct', 'INNER_PRODUCT') def _setup_inner_product(self, layer): param = layer.inner_product_param bias_term = param.bias_term if param.axis != 1: raise RuntimeError( 'Non-default axis in InnerProduct is not supported') blobs = layer.blobs width, height = _get_width(blobs[0]), _get_height(blobs[0]) func = linear.Linear( width, height, nobias=not bias_term, initialW=_Blob(blobs[0]), initial_bias=_Blob(blobs[1]) if bias_term else None) with self.init_scope(): setattr(self, layer.name, func) self.forwards[layer.name] = _CallChildLink(self, layer.name) self._add_layer(layer) @_layer('LRN', 'LRN') def _setup_lrn(self, layer): param = layer.lrn_param if param.norm_region != param.ACROSS_CHANNELS: raise RuntimeError('Within-channel LRN is not supported') fwd = _SingleArgumentFunction( functions.local_response_normalization, n=param.local_size, k=param.k, alpha=param.alpha / param.local_size, beta=param.beta) self.forwards[layer.name] = fwd self._add_layer(layer) @_layer('Pooling', 'POOLING') def _setup_pooling(self, layer): param = layer.pooling_param ksize = _get_ksize(param) stride = _get_stride(param) pad = _get_pad(param) if param.pool == param.MAX: func = functions.max_pooling_2d elif param.pool == param.AVE: func = functions.average_pooling_2d else: raise RuntimeError('Stochastic pooling is not supported') if param.global_pooling and not ksize: # if global_pooling is set but no kernel size, the kernel size # is computed dynamically to cover the whole input feature map def _func(x, stride, pad): return func(x, x.shape[2:], stride=stride, pad=pad) fw = _SingleArgumentFunction(_func, stride=stride, pad=pad) else: fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad) self.forwards[layer.name] = fw self._add_layer(layer) @_layer('ReLU', 'RELU') def _setup_relu(self, layer): slope = layer.relu_param.negative_slope if slope != 0: fw = _SingleArgumentFunction(functions.leaky_relu, slope=slope) else: fw = functions.relu self.forwards[layer.name] = fw self._add_layer(layer) @_layer('Reshape', None) def _setup_reshape(self, layer): shape = layer.reshape_param.shape.dim fw = _SingleArgumentFunction(functions.reshape, shape=shape) self.forwards[layer.name] = fw self._add_layer(layer) @_layer('BatchNorm', None) def _setup_batchnorm(self, layer): # Get layer parameters. blobs = layer.blobs param = layer.batch_norm_param use_global_stats = param.use_global_stats decay = param.moving_average_fraction eps = param.eps size = int(blobs[0].shape.dim[0]) # Get channel dim from mean blob. # Make BatchNormalization link. func = batch_normalization.BatchNormalization( size, decay=decay, eps=eps, use_gamma=False, use_beta=False) _Blob(blobs[0])(func.avg_mean) _Blob(blobs[1])(func.avg_var) # Scale the means and variances if a scaling factor is appended to the # blobs to correctly mimic to the behavior of Caffe. See # https://github.com/BVLC/caffe/issues/4885 if len(blobs) >= 3: scaling_factor = blobs[2].data func.avg_mean /= scaling_factor[0] func.avg_var /= scaling_factor[0] with self.init_scope(): setattr(self, layer.name, func) # Add layer. if use_global_stats: func_class = _SingleArgumentFunctionTestMode else: func_class = _SingleArgumentFunction fwd = func_class(_CallChildLink(self, layer.name), finetune=False) self.forwards[layer.name] = fwd self._add_layer(layer) @_layer('Eltwise', 'ELTWISE') def _setup_eltwise(self, layer): # stable_prod_grad parameter is not supported now. operation = layer.eltwise_param.operation coeffs = layer.eltwise_param.coeff or None self.forwards[layer.name] = _EltwiseFunction(operation, coeffs) self._add_layer(layer) @_layer('Scale', None) def _setup_scale(self, layer): # Following parameters are not supported now: # - negative axis # - num_axes # - filler # - bias_filler # Get layer parameters. bottom = layer.bottom blobs = layer.blobs axis = layer.scale_param.axis bias_term = layer.scale_param.bias_term # Case of only one bottom where W is learnt parameter. if len(bottom) == 1: W_shape = blobs[0].shape.dim func = scale.Scale(axis, W_shape, bias_term) _Blob(blobs[0])(func.W.data) if bias_term: _Blob(blobs[1])(func.bias.b.data) # Case of two bottoms where W is given as a bottom. else: shape = blobs[0].shape.dim if bias_term else None func = scale.Scale( axis, bias_term=bias_term, bias_shape=shape) if bias_term: _Blob(blobs[0])(func.bias.b.data) # Add layer. with self.init_scope(): setattr(self, layer.name, func) self.forwards[layer.name] = _CallChildLink(self, layer.name) self._add_layer(layer) @_layer('Slice', 'SLICE') def _setup_slice(self, layer): if layer.slice_param.HasField('axis'): axis = layer.slice_param.axis elif layer.slice_param.HasField('slice_dim'): axis = layer.slice_param.slice_dim else: axis = 1 if layer.slice_param.slice_point: indices_or_sections = list(layer.slice_param.slice_point) else: indices_or_sections = len(list(layer.top)) self.forwards[layer.name] = _SingleArgumentFunction( functions.split_axis, indices_or_sections=indices_or_sections, axis=axis ) self._add_layer(layer) @_layer('Softmax', 'SOFTMAX') def _setup_softmax(self, layer): if layer.softmax_param.axis != 1: raise RuntimeError( 'Softmax along non-channel axis is not supported') if layer.softmax_param.engine == 0: # DEFAULT fw = functions.softmax elif layer.softmax_param.engine == 1: # CAFFE fw = _SingleArgumentFunctionWithCudnn(False, functions.softmax) elif layer.softmax_param.engine == 2: # CUDNN fw = _SingleArgumentFunctionWithCudnn(True, functions.softmax) self.forwards[layer.name] = fw self._add_layer(layer) @_layer('Sigmoid', 'SIGMOID') def _setup_sigmoid(self, layer): if layer.sigmoid_param.engine == 0: # DEFAULT fw = functions.sigmoid elif layer.sigmoid_param.engine == 1: # CAFFE fw = _SingleArgumentFunctionWithCudnn(False, functions.sigmoid) elif layer.sigmoid_param.engine == 2: # CUDNN fw = _SingleArgumentFunctionWithCudnn(True, functions.sigmoid) self.forwards[layer.name] = fw self._add_layer(layer) @_layer('SoftmaxWithLoss', 'SOFTMAX_LOSS') def _setup_softmax_with_loss(self, layer): if layer.softmax_param.axis != 1: raise RuntimeError( 'Softmax along non-channel axis is not supported') self.forwards[layer.name] = functions.softmax_cross_entropy self._add_layer(layer) @_layer('Split', 'SPLIT') def _setup_split(self, layer): for top in layer.top: self.split_map[top] = layer.bottom[0] # Internal functions def _get_ksize(param): if param.kernel_h > 0: return param.kernel_h, param.kernel_w elif type(param.kernel_size) == int: return param.kernel_size elif len(param.kernel_size) == 1: return param.kernel_size[0] else: return param.kernel_size def _get_stride(param): if param.stride_h > 0: return param.stride_h, param.stride_w elif type(param.stride) == int: return param.stride elif len(param.stride) == 0: return 1 elif len(param.stride) == 1: return param.stride[0] else: return param.stride def _get_pad(param): if param.pad_h > 0 or param.pad_w > 0: return param.pad_h, param.pad_w elif type(param.pad) == int: return param.pad elif len(param.pad) == 0: return 0 elif len(param.pad) == 1: return param.pad[0] else: return param.pad def _get_num(blob): if blob.num > 0: return blob.num else: return blob.shape.dim[0] def _get_channels(blob): if blob.channels > 0: return blob.channels else: return blob.shape.dim[1] def _get_height(blob): if blob.height > 0: return blob.height elif len(blob.shape.dim) == 2: return blob.shape.dim[0] elif len(blob.shape.dim) == 4: return blob.shape.dim[2] else: raise RuntimeError( '{}-dimentional array is not supported'.format( len(blob.shape.dim))) def _get_width(blob): if blob.width > 0: return blob.width elif len(blob.shape.dim) == 2: return blob.shape.dim[1] elif len(blob.shape.dim) == 4: return blob.shape.dim[3] else: raise RuntimeError( '{}-dimentional array is not supported'.format( len(blob.shape.dim))) # Internal class # __call__ must return Variable or tuple class _SingleArgumentFunction(object): def __init__(self, func, *args, **kwargs): self.func = func self.args = args self.kwargs = kwargs def __call__(self, x): return self.func(x, *self.args, **self.kwargs) class _SingleArgumentFunctionTestMode(_SingleArgumentFunction): def __call__(self, x): with configuration.using_config('train', False): return super(_SingleArgumentFunctionTestMode, self).__call__(x) class _ListArgumentFcuntion(object): def __init__(self, func, **kwargs): self.func = func self.kwargs = kwargs def __call__(self, *xs): return self.func(xs, **self.kwargs) class _SingleArgumentFunctionWithCudnn(_SingleArgumentFunction): def __init__(self, use_cudnn, func, *args, **kwargs): super(_SingleArgumentFunctionWithCudnn, self).__init__( func, *args, **kwargs) self.use_cudnn = use_cudnn def __call__(self, x): with configuration.using_config('use_cudnn', self.use_cudnn): return super(_SingleArgumentFunctionWithCudnn, self).__call__(x) class _CallChildLink(object): def __init__(self, caffe_func, name): self.name = name self.caffe_func = caffe_func def __call__(self, *xs, **kwargs): return self.caffe_func[self.name](*xs, **kwargs) class _EltwiseFunction(object): def __init__(self, operation, coeffs=None): if coeffs is not None: assert len(coeffs) > 0 self.operation = operation self.coeffs = coeffs def __call__(self, *xs): operation = self.operation if operation == 0: # PROD return six.moves.reduce(lambda x, y: x * y, xs), elif operation == 1: # SUM coeffs = self.coeffs if coeffs is not None: assert len(xs) == len(coeffs) xs = [x * coeff for x, coeff in zip(xs, coeffs)] return six.moves.reduce(lambda x, y: x + y, xs), elif operation == 2: # MAX return six.moves.reduce(lambda x, y: functions.maximum(x, y), xs), else: raise ValueError('Invalid EltwiseParameter.EltwiseOp value.')
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013, Nachi Ueno, NTT I3, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron.openstack.common import uuidutils from neutron.plugins.common import constants from neutron.services.vpn.device_drivers import ipsec as ipsec_driver from neutron.tests import base _uuid = uuidutils.generate_uuid FAKE_HOST = 'fake_host' FAKE_ROUTER_ID = _uuid() FAKE_VPN_SERVICE = { 'id': _uuid(), 'router_id': FAKE_ROUTER_ID, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'subnet': {'cidr': '10.0.0.0/24'}, 'ipsec_site_connections': [ {'peer_cidrs': ['20.0.0.0/24', '30.0.0.0/24']}, {'peer_cidrs': ['40.0.0.0/24', '50.0.0.0/24']}] } class TestIPsecDeviceDriver(base.BaseTestCase): def setUp(self, driver=ipsec_driver.OpenSwanDriver): super(TestIPsecDeviceDriver, self).setUp() for klass in [ 'os.makedirs', 'os.path.isdir', 'neutron.agent.linux.utils.replace_file', 'neutron.openstack.common.rpc.create_connection', 'neutron.services.vpn.device_drivers.ipsec.' 'OpenSwanProcess._gen_config_content', 'shutil.rmtree', ]: mock.patch(klass).start() self.execute = mock.patch( 'neutron.agent.linux.utils.execute').start() self.agent = mock.Mock() self.driver = driver( self.agent, FAKE_HOST) self.driver.agent_rpc = mock.Mock() def test_vpnservice_updated(self): with mock.patch.object(self.driver, 'sync') as sync: context = mock.Mock() self.driver.vpnservice_updated(context) sync.assert_called_once_with(context, []) def test_create_router(self): process_id = _uuid() process = mock.Mock() process.vpnservice = FAKE_VPN_SERVICE self.driver.processes = { process_id: process} self.driver.create_router(process_id) process.enable.assert_called_once_with() def test_destroy_router(self): process_id = _uuid() process = mock.Mock() process.vpnservice = FAKE_VPN_SERVICE self.driver.processes = { process_id: process} self.driver.destroy_router(process_id) process.disable.assert_called_once_with() self.assertNotIn(process_id, self.driver.processes) def test_sync_added(self): self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ FAKE_VPN_SERVICE] context = mock.Mock() process = mock.Mock() process.vpnservice = FAKE_VPN_SERVICE process.connection_status = {} process.status = constants.ACTIVE process.updated_pending_status = True self.driver.process_status_cache = {} self.driver.processes = { FAKE_ROUTER_ID: process} self.driver.sync(context, []) self.agent.assert_has_calls([ mock.call.add_nat_rule( FAKE_ROUTER_ID, 'POSTROUTING', '-s 10.0.0.0/24 -d 20.0.0.0/24 -m policy ' '--dir out --pol ipsec -j ACCEPT ', top=True), mock.call.add_nat_rule( FAKE_ROUTER_ID, 'POSTROUTING', '-s 10.0.0.0/24 -d 30.0.0.0/24 -m policy ' '--dir out --pol ipsec -j ACCEPT ', top=True), mock.call.add_nat_rule( FAKE_ROUTER_ID, 'POSTROUTING', '-s 10.0.0.0/24 -d 40.0.0.0/24 -m policy ' '--dir out --pol ipsec -j ACCEPT ', top=True), mock.call.add_nat_rule( FAKE_ROUTER_ID, 'POSTROUTING', '-s 10.0.0.0/24 -d 50.0.0.0/24 -m policy ' '--dir out --pol ipsec -j ACCEPT ', top=True), mock.call.iptables_apply(FAKE_ROUTER_ID) ]) process.update.assert_called_once_with() self.driver.agent_rpc.update_status.assert_called_once_with( context, [{'status': 'ACTIVE', 'ipsec_site_connections': {}, 'updated_pending_status': True, 'id': FAKE_VPN_SERVICE['id']}]) def fake_ensure_process(self, process_id, vpnservice=None): process = self.driver.processes.get(process_id) if not process: process = mock.Mock() process.vpnservice = FAKE_VPN_SERVICE process.connection_status = {} process.status = constants.ACTIVE process.updated_pending_status = True self.driver.processes[process_id] = process elif vpnservice: process.vpnservice = vpnservice process.update_vpnservice(vpnservice) return process def test_sync_update_vpnservice(self): with mock.patch.object(self.driver, 'ensure_process') as ensure_process: ensure_process.side_effect = self.fake_ensure_process new_vpn_service = FAKE_VPN_SERVICE updated_vpn_service = copy.deepcopy(new_vpn_service) updated_vpn_service['ipsec_site_connections'].append( {'peer_cidrs': ['60.0.0.0/24', '70.0.0.0/24']}) context = mock.Mock() self.driver.process_status_cache = {} self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ new_vpn_service] self.driver.sync(context, []) process = self.driver.processes[FAKE_ROUTER_ID] self.assertEqual(process.vpnservice, new_vpn_service) self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ updated_vpn_service] self.driver.sync(context, []) process = self.driver.processes[FAKE_ROUTER_ID] process.update_vpnservice.assert_called_once_with( updated_vpn_service) self.assertEqual(process.vpnservice, updated_vpn_service) def test_sync_removed(self): self.driver.agent_rpc.get_vpn_services_on_host.return_value = [] context = mock.Mock() process_id = _uuid() process = mock.Mock() process.vpnservice = FAKE_VPN_SERVICE self.driver.processes = { process_id: process} self.driver.sync(context, []) process.disable.assert_called_once_with() self.assertNotIn(process_id, self.driver.processes) def test_sync_removed_router(self): self.driver.agent_rpc.get_vpn_services_on_host.return_value = [] context = mock.Mock() process_id = _uuid() self.driver.sync(context, [{'id': process_id}]) self.assertNotIn(process_id, self.driver.processes) def test_status_updated_on_connection_admin_down(self): self.driver.process_status_cache = { '1': { 'status': constants.ACTIVE, 'id': 123, 'updated_pending_status': False, 'ipsec_site_connections': { '10': { 'status': constants.ACTIVE, 'updated_pending_status': False, }, '20': { 'status': constants.ACTIVE, 'updated_pending_status': False, } } } } # Simulate that there is no longer status for connection '20' # e.g. connection admin down new_status = { 'ipsec_site_connections': { '10': { 'status': constants.ACTIVE, 'updated_pending_status': False } } } self.driver.update_downed_connections('1', new_status) existing_conn = new_status['ipsec_site_connections'].get('10') self.assertIsNotNone(existing_conn) self.assertEqual(constants.ACTIVE, existing_conn['status']) missing_conn = new_status['ipsec_site_connections'].get('20') self.assertIsNotNone(missing_conn) self.assertEqual(constants.DOWN, missing_conn['status']) def test_status_updated_on_service_admin_down(self): self.driver.process_status_cache = { '1': { 'status': constants.ACTIVE, 'id': 123, 'updated_pending_status': False, 'ipsec_site_connections': { '10': { 'status': constants.ACTIVE, 'updated_pending_status': False, }, '20': { 'status': constants.ACTIVE, 'updated_pending_status': False, } } } } # Simulate that there are no connections now new_status = { 'ipsec_site_connections': {} } self.driver.update_downed_connections('1', new_status) missing_conn = new_status['ipsec_site_connections'].get('10') self.assertIsNotNone(missing_conn) self.assertEqual(constants.DOWN, missing_conn['status']) missing_conn = new_status['ipsec_site_connections'].get('20') self.assertIsNotNone(missing_conn) self.assertEqual(constants.DOWN, missing_conn['status'])
import datetime import pytz import json from django.contrib.sessions.models import Session from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.conf.urls import url from django.core.exceptions import MultipleObjectsReturned from django.core.management import call_command from dateutil import tz from tastypie import fields from tastypie.authentication import BasicAuthentication from tastypie.authorization import DjangoAuthorization from tastypie.paginator import Paginator from tastypie.resources import ALL from tastypie.resources import ALL_WITH_RELATIONS from tastypie.resources import ModelResource from tastypie.utils import trailing_slash from tastypie.http import HttpUnauthorized, HttpForbidden from api.defaults import DEFAULT_BLACKLIST from api.models import BlackListItem, check_bumps, notify_message from api.models import ChatMessage from api.models import EyeHistory from api.models import EyeHistoryMessage from api.models import MuteList from api.models import WhiteListItem from api.models import merge_histories from api.models import Highlight from api.models import Ratings from api.models import Page from api.models import Domain from api.resource_helpers import get_BlackListItem from api.resource_helpers import get_WhiteListItem from api.resource_helpers import get_port from api.resource_helpers import urlencodeSerializer from api.utils import humanize_time from accounts.models import UserProfile from tags.models import Tag, CommonTag from common.templatetags.filters import url_domain from common.templatetags.gravatar import gravatar_for_user from eyebrowse.log import logger class MyBasicAuthentication(BasicAuthentication): def __init__(self, *args, **kwargs): super(MyBasicAuthentication, self).__init__(*args, **kwargs) def is_authenticated(self, request, **kwargs): if 'sessionid' in request.COOKIES: s = Session.objects.filter(pk=request.COOKIES['sessionid']) if s.exists(): s = s[0] if '_auth_user_id' in s.get_decoded(): u = User.objects.get(id=s.get_decoded()['_auth_user_id']) request.user = u return True return False class PublicGetAuthentication(MyBasicAuthentication): def is_authenticated(self, request, **kwargs): if request.method == 'GET': return True else: return super(PublicGetAuthentication, self).is_authenticated(request, **kwargs) class BaseMeta: ''' Abstract class to get basic authentication and authorization. ''' authentication = MyBasicAuthentication() authorization = DjangoAuthorization() serializer = urlencodeSerializer() class BaseResource(ModelResource): ''' Subclass this to get generic ModelResource add-ins that TastyPie doesn't supply. ''' def apply_authorization_limits(self, request, object_list): return object_list.filter(user=request.user) class UserResource(ModelResource): def override_urls(self): return [ url(r'^(?P<resource_name>%s)/(?P<username>[\w\d_.-]+)/$' % self._meta.resource_name, self.wrap_view( 'dispatch_detail'), name='api_dispatch_detail'), ] class Meta(BaseMeta): queryset = User.objects.all() resource_name = 'user' detail_allowed_methods = ['get'] list_allowed_methods = [] fields = ['username', 'first_name', 'last_name', 'last_login'] filtering = { 'username': ALL, } class LoginResource(ModelResource): class Meta: queryset = User.objects.all() fields = ['first_name', 'last_name', 'email'] allowed_methods = ['get', 'post'] resource_name = 'auth' def override_urls(self): return [ url(r"^(?P<resource_name>%s)/login%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('login'), name="api_login"), url(r'^(?P<resource_name>%s)/logout%s$' % (self._meta.resource_name, trailing_slash()), self.wrap_view('logout'), name='api_logout'), ] def login(self, request, **kwargs): self.method_check(request, allowed=['post']) username = request.POST.get('username', '') password = request.POST.get('password', '') user = authenticate(username=username, password=password) if user: if user.is_active: login(request, user) return self.create_response(request, { 'success': True }) else: return self.create_response(request, { 'success': False, 'reason': 'disabled', }, HttpForbidden ) else: return self.create_response(request, { 'success': False, 'reason': 'incorrect', }, HttpUnauthorized ) def logout(self, request, **kwargs): self.method_check(request, allowed=['get']) if request.user and request.user.is_authenticated(): logout(request) return self.create_response(request, { 'success': True }) else: return self.create_response(request, { 'success': False }, HttpUnauthorized) class UserProfileResource(ModelResource): user = fields.ForeignKey(UserResource, 'user') class Meta(BaseMeta): queryset = UserProfile.objects.all() resource_name = 'user_profile' detail_allowed_methods = ['get'] list_allowed_methods = [] fields = ['pic_url'] filtering = { 'user': ALL_WITH_RELATIONS } class MuteListResource(BaseResource): user = fields.ForeignKey(UserResource, 'user') def obj_create(self, bundle, request=None, **kwargs): domain = bundle.data['domain'] try: MuteList.objects.get(user=request.user, domain=domain) except MuteList.DoesNotExist: return super(MuteListResource, self).obj_create(bundle, request, user=request.user, **kwargs) return bundle class Meta(BaseMeta): list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get', 'post', 'put', 'delete'] filtering = { 'user': ALL_WITH_RELATIONS, 'domain': ALL, } queryset = MuteList.objects.select_related().all() resource_name = 'mutelist' class FilterSetItemResource(BaseResource): ''' Abstract base class ''' user = fields.ForeignKey(UserResource, 'user') class Meta(BaseMeta): detail_allowed_methods = ['get', 'post', 'put', 'delete'] filtering = { 'user': ALL_WITH_RELATIONS, 'date_created': ALL, 'url': ALL, 'port': ALL } resource_name = 'filterset' class WhiteListItemResource(FilterSetItemResource): def obj_create(self, bundle, request=None, **kwargs): url = bundle.data['url'] port = get_port(bundle.data) bundle.data['port'] = port # check to see if this exists blacklist_item = get_BlackListItem(url, port) if blacklist_item: blacklist_item.delete() # do not create if it is a default blacklist url if url in DEFAULT_BLACKLIST: return bundle try: WhiteListItem.objects.get(user=request.user, url=url, port=port) except WhiteListItem.DoesNotExist: try: return super(WhiteListItemResource, self).obj_create( bundle, request, user=request.user, **kwargs) except MultipleObjectsReturned as e: logger.info(e) return bundle return bundle class Meta(FilterSetItemResource.Meta): queryset = WhiteListItem.objects.select_related().all() resource_name = 'whitelist' class BlackListItemResource(FilterSetItemResource): def obj_create(self, bundle, request=None, **kwargs): url = bundle.data['url'] port = get_port(bundle) bundle.data['port'] = port # check to see if this exists whitelist_item = get_WhiteListItem(url, port) if whitelist_item: whitelist_item.delete() try: BlackListItem.objects.get(user=request.user, url=url, port=port) except BlackListItem.DoesNotExist: try: return super(BlackListItemResource, self ).obj_create( bundle, request, user=request.user, **kwargs) except MultipleObjectsReturned as e: logger.info(e) return bundle return bundle class Meta(FilterSetItemResource.Meta): queryset = BlackListItem.objects.select_related().all() resource_name = 'blacklist' class PageResource(ModelResource): class Meta(BaseMeta): queryset = Page.objects.all() resource_name = 'page-data' list_allowed_methods = ['get'] detail_allowed_methods = ['get'] class RatingsResource(ModelResource): user = fields.ForeignKey(UserResource, 'user') page = fields.ForeignKey(PageResource, 'page', full=True) class Meta(BaseMeta): queryset = Ratings.objects.all() resource_name = 'ratings-data' list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get','put','post'] filtering = { 'url': ALL, 'domain':ALL } def get_page(self,bundle): url = bundle.data["url"] domain = bundle.data["domain"] domain,_ = Domain.objects.get_or_create(url=domain) page,_ = Page.objects.get_or_create(url=url,domain=domain) return page def obj_update(self, bundle, request=None, **kwargs): user = request.user return super(RatingsResource,self).obj_update(bundle,request,user=user, page=self.get_page(bundle),from_time_distribution=False, **kwargs) def obj_create(self, bundle, request=None, **kwargs): user = request.user return super(RatingsResource,self).obj_create(bundle,request,user=user, page=self.get_page(bundle),**kwargs) def obj_get(self, bundle, request=None, **kwargs): user = request.user return super(RatingsResource,self).obj_get(bundle,request,user=user, page=self.get_page(bundle),**kwargs) def dehydrate(self, bundle): bundle.data["url"] = bundle.data["page"].data["url"] return bundle.data class EyeHistoryMessageResource(ModelResource): def apply_authorization_limits(self, request, object_list): return object_list.filter(eyehistory__user=request.user) class Meta(BaseMeta): queryset = EyeHistoryMessage.objects.all() resource_name = 'history-message' list_allowed_methods = ['get'] detail_allowed_methods = ['get'] class EyeHistoryResource(ModelResource): user = fields.ForeignKey(UserResource, 'user') message = fields.ToManyField( EyeHistoryMessageResource, 'eyehistorymessage_set', null=True, blank=True, full=True) class Meta(BaseMeta): queryset = EyeHistory.objects.select_related( ).all().order_by('-start_time') resource_name = 'history-data' list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get', 'post', 'put', 'delete'] filtering = { 'user': ALL_WITH_RELATIONS, 'url': ALL, 'title': ALL, 'start_time': ALL, 'end_time': ALL, 'total_time': ALL, } paginator_class = Paginator authentication = PublicGetAuthentication() def dehydrate(self, bundle): bundle.data['username'] = bundle.obj.user.username bundle.data['pic_url'] = gravatar_for_user( User.objects.get(username=bundle.obj.user.username)) return bundle.data def obj_create(self, bundle, request=None, **kwargs): url = bundle.data['url'] domain = url_domain(url) bundle.data['domain'] = domain title = bundle.data.get('title') start_time = bundle.data.get('start_time') start_event = bundle.data.get('start_event') end_time = bundle.data.get('end_time') end_event = bundle.data.get('end_event') favicon_url = bundle.data.get('favIconUrl') bundle.data['favicon_url'] = favicon_url src = bundle.data.get('src') tags = bundle.data.get('tags') if tags: tags = json.loads(tags); if end_time and start_time: end_time = datetime.datetime.strptime( end_time, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=pytz.utc) start_time = datetime.datetime.strptime( start_time, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=pytz.utc) else: end_time = datetime.datetime.now().replace(tzinfo=pytz.utc) start_time = datetime.datetime.now().replace(tzinfo=pytz.utc) message = bundle.data.get('message') highlight = bundle.data.get('highlight') parent_comment = bundle.data.get('parent_comment') if message and message.strip() == '': message = None if message: bundle.data.pop('message', None) if highlight: bundle.data.pop('highlight', None) if parent_comment: bundle.data.pop('parent_comment', None) try: exists = EyeHistory.objects.filter(user=request.user, url=url, title=title, src=src, favicon_url=favicon_url, start_time__gt=start_time - datetime.timedelta(minutes=1), start_event=start_event) if exists.count() > 0: eye_his = exists[0] eye_his.end_time = end_time eye_his.end_event = end_event elapsed_time = end_time - start_time eye_his.total_time = int(round( (elapsed_time.microseconds / 1.0E3) + (elapsed_time.seconds * 1000) + (elapsed_time.days * 8.64E7))) eye_his.humanize_time = humanize_time(elapsed_time) eye_his.save() if message: eye_message, _ = EyeHistoryMessage.objects.get_or_create( eyehistory=eye_his, message=message) notify_message(message=eye_message) else: # save_raw_eyehistory(request.user, url, title, start_event, end_event, start_time, end_time, src, domain, favicon_url) dup_histories = EyeHistory.objects.filter( user=request.user, url=url, title=title, end_time__gt=start_time - datetime.timedelta(minutes=5)) if dup_histories.count() > 0: obj = merge_histories(dup_histories, end_time, end_event) if message: eye_message, _ = EyeHistoryMessage.objects.get_or_create( eyehistory=obj, message=message) notify_message(message=eye_message) else: bundle_res = super(EyeHistoryResource, self).obj_create( bundle, request, user=request.user, **kwargs) check_bumps(request.user, start_time, end_time, url) if message: eye_message = None if parent_comment: h = Highlight.objects.get(id=highlight) eye_message, _ = EyeHistoryMessage.objects.get_or_create( eyehistory=bundle_res.obj, message=message, highlight=h, parent_comment=parent_comment) elif highlight: h = Highlight.objects.get(id=highlight) eye_message, _ = EyeHistoryMessage.objects.get_or_create( eyehistory=bundle_res.obj, message=message, highlight=h) else: eye_message, _ = EyeHistoryMessage.objects.get_or_create( eyehistory=bundle_res.obj, message=message) if tags: for tag in tags: if len(Tag.objects.filter(comment=eye_message, common_tag__name=tag)) == 0: try: common_tag = CommonTag.objects.get(name=tag) vt = Tag( common_tag=common_tag, user=request.user, comment=eye_message, ) vt.save() except CommonTag.DoesNotExist: pass notify_message(message=eye_message) return bundle_res except MultipleObjectsReturned as e: logger.info(e) # multiple items created, delete duplicates call_command('remove_duplicate_history') return bundle class ChatMessageResource(ModelResource): author = fields.ForeignKey(UserResource, 'author') def dehydrate(self, bundle): bundle.data['author'] = bundle.obj.author.username return bundle class Meta(BaseMeta): queryset = ChatMessage.objects.select_related().all() resource_name = 'chatmessages' list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get', 'post', 'put', 'delete'] excludes = ['id'] filtering = { 'author': ALL_WITH_RELATIONS, 'url': ALL, 'date': ALL, 'messages': ALL, } def apply_filters(self, request, applicable_filters): base_object_list = super(ChatMessageResource, self).apply_filters( request, applicable_filters) return base_object_list def obj_create(self, bundle, request=None, **kwargs): val = None try: bundle.data['date'] = datetime.datetime.strptime( bundle.data['date']['_d'], '%Y-%m-%dT%H:%M:%S.%fZ') val = super(ChatMessageResource, self).obj_create( bundle, request, **kwargs) notify_message(chat=val.obj) except Exception, e: logger.exception(e) return val
# Copyright (c) 2020-2021, Manfred Moitzi # License: MIT License """ This module provides "nested Polygon" detection for multiple paths. Terminology ----------- exterior creates a filled area, has counter-clockwise (ccw) winding in matplotlib exterior := Path hole creates an unfilled area, has clockwise winding (cw) in matplotlib, hole := Polygon polygon list of nested paths: polygon without a hole: [path] polygon with 1 hole: [path, [path]] polygon with 2 separated holes: [path, [path], [path]] polygon with 2 nested holes: [path, [path, [path]]] polygon := [exterior, hole*] The result is a list of polygons: 1 polygon returns: [[ext-path]] 2 separated polygons returns: [[ext-path], [ext-path, [hole-path]]] A hole is just another polygon, for a correct visualisation in matplotlib the winding of the nested paths have to follow the alternating order ccw-cw-ccw-cw... : [Exterior-ccw, [Hole-Exterior-cw, [Sub-Hole-ccw], [Sub-Hole-ccw], ], [Hole-Exterior-cw], [Hole-Exterior-cw], ] The implementation has to do some expensive tests, like check if a path is inside of another path or if paths do overlap. A goal is to reduce this costs by using proxy objects: Bounding Box Proxy ------------------ Use the bounding box, this is very fast but not accurate, but could handle most of the real world scenarios, in the assumption that most HATCHES are created from non-overlapping boundary paths. Overlap detection and resolving is not possible. Bounding Box Construction: - Fast: use bounding box from control vertices - Accurate: use bounding box from flattened curve Inside Check: - Fast: center point of the bounding box - Slow: use all corner points of the bounding box Convex Hull Proxy ----------------- Use the convex hull of the path, this is more accurate but also much slower. Overlap detection and resolving is not possible. Convex Hull construction: - Fast: use convex hull from control vertices - Accurate: use convex hull from flattened curve Inside Check: - Fast: center point of convex hull - Slow: use all points of the convex hull Flattened Curve --------------- Use the flattened curve vertices, this is the most accurate solution and also the slowest. Overlap detection and resolving is possible: exterior is the union of two overlapping paths, hole is the intersection of this two paths, the hole vertices have to be subtracted from the exterior vertices. Sort by Area ------------ It is not possible for a path to contain another path with a larger area. """ from typing import Tuple, Optional, List, Iterable, TypeVar from collections import namedtuple from .path import Path from ezdxf.math import BoundingBox2d __all__ = [ "fast_bbox_detection", "winding_deconstruction", "group_paths", "flatten_polygons", ] Exterior = Path Polygon = TypeVar("Polygon") Polygon = Tuple[Exterior, Optional[List[Polygon]]] # type: ignore BoxStruct = namedtuple("BoxStruct", "bbox, path") def fast_bbox_detection(paths: Iterable[Path]) -> List[Polygon]: """Create a nested polygon structure from iterable `paths`, using 2D bounding boxes as fast detection objects. """ # Implements fast bounding box construction and fast inside check. def area(item: BoxStruct) -> float: width, height = item.bbox.size return width * height def separate( exterior: BoundingBox2d, candidates: List[BoxStruct] ) -> Tuple[List[BoxStruct], List[BoxStruct]]: holes: List[BoxStruct] = [] outside: List[BoxStruct] = [] for candidate in candidates: # Fast inside check: ( holes if exterior.inside(candidate.bbox.center) else outside ).append(candidate) return holes, outside def polygon_structure(outside: List[BoxStruct]) -> List[List]: polygons = [] while outside: exterior = outside.pop() # path with largest area # Get holes inside of exterior and returns the remaining paths # outside of exterior: holes, outside = separate(exterior.bbox, outside) if holes: # build nested hole structure: # the largest hole could contain the smaller holes, # and so on ... holes = polygon_structure(holes) # type: ignore polygons.append([exterior, *holes]) return polygons def as_nested_paths(polygons) -> List: return [ polygon.path if isinstance(polygon, BoxStruct) else as_nested_paths(polygon) for polygon in polygons ] boxed_paths = [ # Fast bounding box construction: BoxStruct(BoundingBox2d(path.control_vertices()), path) for path in paths if len(path) ] boxed_paths.sort(key=area) return as_nested_paths(polygon_structure(boxed_paths)) def winding_deconstruction( polygons: List[Polygon], ) -> Tuple[List[Path], List[Path]]: """Flatten the nested polygon structure in a tuple of two lists, the first list contains the paths which should be counter-clockwise oriented and the second list contains the paths which should be clockwise oriented. The paths are not converted to this orientation. """ def deconstruct(polygons_, level): for polygon in polygons_: if isinstance(polygon, Path): # level 0 is the list of polygons # level 1 = ccw, 2 = cw, 3 = ccw, 4 = cw, ... (ccw_paths if (level % 2) else cw_paths).append(polygon) else: deconstruct(polygon, level + 1) cw_paths: List[Path] = [] ccw_paths: List[Path] = [] deconstruct(polygons, 0) return ccw_paths, cw_paths def flatten_polygons(polygons: Polygon) -> Iterable[Path]: """Yield a flat representation of the given nested polygons.""" for polygon in polygons: # type: ignore if isinstance(polygon, Path): yield polygon else: yield from flatten_polygons(polygon) # type: ignore def group_paths(paths: Iterable[Path]) -> List[List[Path]]: """Group separated paths and their inner holes as flat lists.""" polygons = fast_bbox_detection(paths) # type: ignore return [list(flatten_polygons(polygon)) for polygon in polygons]
# -*- coding: utf-8 -*- import os import subprocess import sys import time from contextlib import contextmanager from subprocess import PIPE from genestack.genestack_exceptions import GenestackException from genestack.core_files.genestack_file import File from genestack.environment import PROGRAMS_DIRECTORY from genestack.utils import join_program_path, log_info, log_warning, format_tdelta, deprecated from plumbum import local from plumbum.commands import ExecutionModifier class RUN(ExecutionModifier): """ An execution modifier that runs the given command in the foreground, passing it to the current process `stdout` and `stderr`. Add log markers to `stdout` and `stderr` if ``verbose``. """ __slots__ = ('stdout', 'verbose') def __init__(self, stdout=None, verbose=True): self.stdout = stdout self.verbose = verbose def __rand__(self, cmd): with _print_command_info(cmd, self.verbose): if self.stdout: cmd = cmd > self.stdout cmd(stdout=None, stderr=None) class OUTPUT(ExecutionModifier): """ An execution modifier that runs the given command in the foreground, returns its `stdout` as string and passing its `stderr` to the current process `stderr`. Add log markers to `stdout` and `stderr` if ``verbose``. """ __slots__ = ('verbose',) def __init__(self, verbose=True): self.verbose = verbose def __rand__(self, cmd): with _print_command_info(cmd, True), cmd.bgrun(stdin=None, stdout=PIPE, stderr=None) as p: return p.run()[1] RUN = RUN() OUTPUT = OUTPUT() _toolset_inited = False _toolsets = {} _arguments = [] def _init_toolsets(): global _toolset_inited if not _toolset_inited: params_key = 'genestack:tool.arguments' version_prefix = 'genestack:tool.version:' mi = File().get_metainfo() _arguments.extend(x.value for x in mi.get_value_as_list(params_key)) versions = {k[len(version_prefix):]: mi.get(k).value for k in mi if k.startswith(version_prefix)} _toolsets.update({k: Toolset(k, v, verbose=True) for k, v in versions.items()}) _toolset_inited = True def get_argument_string(): """ Return argument string for CLA that uses only single command line. If more than one command found raises :py:class:`~genestack.GenestackException`, use :py:meth:`get_argument_string_list` in that case :return: argument string :rtype: str """ _init_toolsets() if not _arguments: return '' if len(_arguments) == 1: return _arguments[0] else: raise GenestackException('Too many arguments found, use get_argument_string_list') def get_argument_string_list(): """ Return list of the argument strings. If more than one command found raises :py:class:`~genestack.GenestackException` :return: list of argument strings :rtype: list[str] """ _init_toolsets() return list(_arguments) def _get_tool(toolset, tool, verbose=True): """ Return Tool instance. :type toolset: str :type tool: str :rtype: Tool """ _init_toolsets() if toolset not in _toolsets: raise GenestackException( 'Cannot get version for toolset "%s", ' 'this version should be set in metainfo by application' % toolset) toolset = _toolsets[toolset] toolset.verbose = verbose return toolset.get_tool(tool) @deprecated('use "get_tool" instead') def get_command(toolset, tool, uses=None): return get_tool(toolset, tool, uses=uses) def get_version(toolset): """ Return toolset version. :param toolset: toolset name :type toolset: str :return: toolset version as a string :rtype: str """ _init_toolsets() toolset = _toolsets.get(toolset) return toolset.version def get_tool(toolset, tool, uses=None): """ Return command with path and required environment. See plumbum docs for more info http://plumbum.readthedocs.io/en/latest/# :param toolset: toolset name :type toolset: str :param tool: tool name :type tool: str :param uses: list of toolset names to be added to PATH :type uses: list[str] :return: command to run tool :rtype: plumbum.commands.base.BoundEnvCommand | plumbum.machines.LocalCommand """ tool = _get_tool(toolset, tool) cmd = tool.get_tool_command() if uses: # TODO make proper message if toolset is not present path = local.env['PATH'] + ':' + ':'.join([_toolsets[x].get_directory() for x in uses]) cmd = cmd.with_env(PATH=path) return cmd @deprecated('use "get_tool_path" instead') def get_command_path(toolset, tool): return get_tool_path(toolset, tool) def get_tool_path(toolset, tool): """ Return path to tool executable. :param toolset: toolset name :type toolset: str :param tool: tool name :type tool: str :return: """ tool = _get_tool(toolset, tool) return tool.get_executable_path() def get_directory(toolset): """ Return directory where executables are located. :param toolset: toolset name :type toolset: str :return: directory where executables are located :rtype: str """ return _toolsets[toolset].get_directory() @contextmanager def _print_command_info(command, verbose): if verbose: start_message = 'Start: %s' % str(command).replace(PROGRAMS_DIRECTORY + '/', '', 1) log_info(start_message) log_warning(start_message) start = time.time() yield tdelta = format_tdelta(time.time() - start) exit_msg = 'Command run finished, %s elapsed' % tdelta log_info(exit_msg) log_warning(exit_msg) return yield return class CLA(object): def __init__(self, a_file): pass def argument_string(self): return get_argument_string() def argument_string_list(self): return get_argument_string_list() def get_tool(self, toolset, tool, verbose=True): return _get_tool(toolset, tool, verbose=verbose) class Toolset(object): def __init__(self, name, version, verbose=False): self.__name = name self.__version = version self.__directory = join_program_path(name, version) self.verbose = verbose self.path_extras = [] if not os.path.exists(self.__directory): raise GenestackException( 'Tool "%s" with version "%s" is not installed' % (name, version)) self.uses(self) @property def name(self): return self.__name @property def version(self): return self.__version def get_tool(self, name): return Tool(self, name) def get_directory(self): with_bin = os.path.join(self.__directory, 'bin') return with_bin if os.path.exists(with_bin) else self.__directory def uses(self, toolset): self.path_extras.append(toolset.get_directory()) def get_version(self): sys.stderr.write('This method is deprecated, use "version" property\n') return self.__version @property def version(self): return self.__version @property def name(self): return self.__name class Tool(object): def __init__(self, toolset, name): self.__toolset = toolset self.__executable = name if not os.path.exists(self.get_executable_path()): raise GenestackException( 'Executable "%s" not found for tool "%s" with version "%s"' % ( self.__executable, toolset.name, toolset.version)) def get_executable_name(self): return self.__executable def get_executable_path(self): return os.path.join(self.get_directory(), self.__executable) def __log_start(self, arguments): enter_msg = 'Start %s(%s): %s %s' % (self.__toolset.name, self.__toolset.version, self.get_executable_name(), ' '.join(arguments)) log_info(enter_msg) log_warning(enter_msg) self.__start_time = time.time() def __log_finish(self): tdelta = format_tdelta(time.time() - self.__start_time) exit_msg = 'Running "%s" finished, %s elapsed\n' % (self.get_executable_name(), tdelta) log_info(exit_msg) log_warning(exit_msg) def run(self, arguments, verbose=None, stdout=None, stderr=None): """ Run tool with arguments. Wait for tool to complete. If the exit code was zero then return, otherwise raise GenestackException. This method is thread safe, except log output. Use `verbose=False` when multiprocessing. :param arguments: command arguments :param verbose: flag to print start and end markers to log, if not specified uses Toolset preferences :return: None """ if verbose is None: verbose = self.__toolset.verbose if verbose: self.__log_start(arguments) try: retcode = subprocess.call(self.__compose_arguments(arguments), stdout=stdout, stderr=stderr, shell=True) if retcode != 0: raise GenestackException( 'Command "%s" returned non-zero exit status %d' % ( self.get_executable_name(), retcode)) finally: if verbose: self.__log_finish() def output(self, arguments, verbose=None, stderr=None): """ Run tool with arguments and return its output as a byte string. If the exit code was non-zero it raises a :py:class:`~genestack.GenestackException`. This method is thread safe, except log output. Use `verbose=False` when multiprocessing. :param arguments: command arguments :param verbose: flag to print start and end markers to log, if not specified uses Toolset preferences :return: output :rtype: str """ if verbose is None: verbose = self.__toolset.verbose if verbose: self.__log_start(arguments) try: return subprocess.check_output(self.__compose_arguments(arguments), shell=True, stderr=stderr ) except subprocess.CalledProcessError as e: print e.output raise GenestackException( 'Command "%s" returned non-zero exit status %d' % ( self.get_executable_name(), e.returncode)) finally: if verbose: self.__log_finish() def __compose_arguments(self, arguments): path_string = ':'.join(self.__toolset.path_extras + ['$PATH']) export_path_string = 'export PATH=%s;' % path_string if self.__executable.endswith('.py'): to_run = 'python ' + self.get_executable_path() else: to_run = self.__executable return ' '.join([export_path_string, to_run] + [str(x) for x in arguments]) def get_directory(self): return self.__toolset.get_directory() def uses(self, toolset): self.__toolset.path_extras.append(toolset.get_directory()) def get_version(self): sys.stderr.write('This method is deprecated, use "version" property\n') return self.__toolset.version @property def version(self): return self.__toolset.version def get_tool_command(self): if self.__executable.endswith('.py'): command = local['python'][self.get_executable_path()] else: with local.env(PATH=self.get_directory()): command = local[self.__executable] return command
#Bandwidth optimization methods __author__ = "Taylor Oshan" import numpy as np from scipy import linalg from copy import deepcopy import copy from collections import namedtuple def golden_section(a, c, delta, function, tol, max_iter, int_score=False): """ Golden section search routine Method: p212, 9.6.4 Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002). Geographically weighted regression: the analysis of spatially varying relationships. Parameters ---------- a : float initial max search section value b : float initial min search section value delta : float constant used to determine width of search sections function : function obejective function to be evaluated at different section values int_score : boolean False for float score, True for integer score tol : float tolerance used to determine convergence max_iter : integer maximum iterations if no convergence to tolerance Returns ------- opt_val : float optimal value opt_score : kernel optimal score output : list of tuples searching history """ b = a + delta * np.abs(c-a) d = c - delta * np.abs(c-a) score = 0.0 diff = 1.0e9 iters = 0 output = [] dict = {} while np.abs(diff) > tol and iters < max_iter: iters += 1 if int_score: b = np.round(b) d = np.round(d) if b in dict: score_b = dict[b] else: score_b = function(b) dict[b] = score_b if d in dict: score_d = dict[d] else: score_d = function(d) dict[d] = score_d if score_b <= score_d: opt_val = b opt_score = score_b c = d d = b b = a + delta * np.abs(c-a) #if int_score: #b = np.round(b) else: opt_val = d opt_score = score_d a = b b = d d = c - delta * np.abs(c-a) #if int_score: #d = np.round(b) #if int_score: # opt_val = np.round(opt_val) output.append((opt_val, opt_score)) diff = score_b - score_d score = opt_score return np.round(opt_val, 2), opt_score, output def equal_interval(l_bound, u_bound, interval, function, int_score=False): """ Interval search, using interval as stepsize Parameters ---------- l_bound : float initial min search section value u_bound : float initial max search section value interval : float constant used to determine width of search sections function : function obejective function to be evaluated at different section values int_score : boolean False for float score, True for integer score Returns ------- opt_val : float optimal value opt_score : kernel optimal score output : list of tuples searching history """ a = l_bound c = u_bound b = a + interval if int_score: a = np.round(a,0) c = np.round(c,0) b = np.round(b,0) output = [] score_a = function(a) score_c = function(c) output.append((a,score_a)) output.append((c,score_c)) if score_a < score_c: opt_val = a opt_score = score_a else: opt_val = c opt_score = score_c while b < c: score_b = function(b) output.append((b,score_b)) if score_b < opt_score: opt_val = b opt_score = score_b b = b + interval return opt_val, opt_score, output def multi_bw(init, y, X, n, k, family, tol, max_iter, rss_score, gwr_func, bw_func, sel_func, multi_bw_min, multi_bw_max): """ Multiscale GWR bandwidth search procedure using iterative GAM backfitting """ if init is None: bw = sel_func(bw_func(y, X)) optim_model = gwr_func(y, X, bw) else: optim_model = gwr_func(y, X, init) S = optim_model.S err = optim_model.resid_response.reshape((-1,1)) param = optim_model.params R = np.zeros((n,n,k)) for j in range(k): for i in range(n): wi = optim_model.W[i].reshape(-1,1) xT = (X * wi).T P = linalg.solve(xT.dot(X), xT) R[i,:,j] = X[i,j]*P[j] XB = np.multiply(param, X) if rss_score: rss = np.sum((err)**2) iters = 0 scores = [] delta = 1e6 BWs = [] VALs = [] FUNCs = [] try: from tqdm import tqdm #if they have it, let users have a progress bar except ImportError: def tqdm(x): #otherwise, just passthrough the range return x for iters in tqdm(range(1, max_iter+1)): new_XB = np.zeros_like(X) bws = [] vals = [] funcs = [] current_partial_residuals = [] params = np.zeros_like(X) f_XB = XB.copy() f_err = err.copy() for j in range(k): temp_y = XB[:,j].reshape((-1,1)) temp_y = temp_y + err temp_X = X[:,j].reshape((-1,1)) bw_class = bw_func(temp_y, temp_X) funcs.append(bw_class._functions) bw = sel_func(bw_class, multi_bw_min[j], multi_bw_max[j]) optim_model = gwr_func(temp_y, temp_X, bw) Aj = optim_model.S new_Rj = Aj - np.dot(Aj, S) + np.dot(Aj, R[:,:,j]) S = S - R[:,:,j] + new_Rj R[:,:,j] = new_Rj err = optim_model.resid_response.reshape((-1,1)) param = optim_model.params.reshape((-1,)) new_XB[:,j] = optim_model.predy.reshape(-1) bws.append(copy.deepcopy(bw)) params[:,j] = param vals.append(bw_class.bw[1]) current_partial_residuals.append(err.copy()) num = np.sum((new_XB - XB)**2)/n den = np.sum(np.sum(new_XB, axis=1)**2) score = (num/den)**0.5 XB = new_XB if rss_score: predy = np.sum(np.multiply(params, X), axis=1).reshape((-1,1)) new_rss = np.sum((y - predy)**2) score = np.abs((new_rss - rss)/new_rss) rss = new_rss scores.append(copy.deepcopy(score)) delta = score BWs.append(copy.deepcopy(bws)) VALs.append(copy.deepcopy(vals)) FUNCs.append(copy.deepcopy(funcs)) if delta < tol: break opt_bws = BWs[-1] return (opt_bws, np.array(BWs), np.array(scores), params, err, S, R)
#!/usr/bin/python # -*- coding: utf-8 -*- from ansible.module_utils.basic import * import copy import os from subprocess import Popen, PIPE, STDOUT import time DOCUMENTATION = """ --- module: sdc_pipeline short_description: Performs common actions on a StreamSets pipeline description: - Performs common actions such as list, status, start, stop, reset, import, export, and delete of pipelines in StreamSets Data Collector. author: "StreamSets, @streamsets" requirements: - StreamSets Data Collector should be installed on the target hosts. options: sdc_dist: description: - Path where StreamSets Data Collector is installed. required: false default: SDC_DIST environment variable. url: description: - URL of Data Collector UI. required: false default: http://localhost:18630 auth_type: description: - Authentication type for this instance of Data Collector required: false default: form choices: [none, basic, digest, form] user: description: - Username to authenticate with. required: false default: admin password: description: - Password to authenticate with. required: false default: admin action: description: - The action to perform. required: true choices: [list, status, start, stop, reset, import, export, delete] pipeline: description: - The name of the pipeline to operate on. Not required for 'list' src: description: - Path to JSON file to import. Used only with the 'import' action. dest: description: - Path to JSON file to write. Used only with the 'export' action. """ EXAMPLES = """ - name: updating load_balance policy vertica_configuration: name=failovertostandbyafter value='8 hours' """ actions = { 'list': ['store', 'list'], 'status': ['manager', 'status'], 'start': ['manager', 'start'], 'stop': ['manager', 'stop'], 'reset': ['manager', 'reset-origin'], 'import': ['store', 'import'], 'export': ['store', 'export'], 'delete': ['store', 'delete'], } def main(): module = AnsibleModule( argument_spec=dict( sdc_dist=dict(default=os.environ.get('SDC_DIST')), url=dict(default='http://localhost:18630', aliases=['instance']), action=dict(required=True), pipeline=dict(default=None), src=dict(default=None), dest=dict(default=None), auth_type=dict(default='form'), user=dict(default='admin'), password=dict(default='admin'), ), supports_check_mode=True, ) sdc_dist = module.params['sdc_dist'] url = module.params['url'] action = module.params['action'] pipeline = module.params['pipeline'] src = module.params['src'] dest = module.params['dest'] auth_type = module.params['auth_type'] user = module.params['user'] password = module.params['password'] if sdc_dist is not None: if not os.path.exists(sdc_dist): module.fail_json(msg="Path '%s' does not exist" % sdc_dist) else: module.fail_json( msg="Since the SDC_DIST environment variable is not set, you " + "must specify the 'sdc_dist' argument to Ansible." ) if 'list' != action and pipeline is None: module.fail_json( msg='pipeline must be specified for this action.' ) if 'import' == action and src is None: module.fail_json( msg='src must be specified when importing a pipeline.' ) if 'export' == action and dest is None: module.fail_json( msg='dest must be specified when exporting a pipeline.' ) changed = False streamsets_cli = os.path.join(sdc_dist, 'bin', 'streamsets') args = [] if pipeline: args = args + ['--name', pipeline] if src: args = args + ['--file', src] if dest: args = args + ['--file', dest] command = build_command( [ streamsets_cli, 'cli', '--auth-type', auth_type, '--url', url, '--user', user, '--password', password, ], action, args ) # Used for result output only str_command = ' '.join(command) if not module.check_mode: p = Popen( command, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True ) result = p.stdout.read() time.sleep(1) # The CLI currently always returns exit code 0, so we look for # a parseable JSON response to determine success/failure. try: parsed_result = json.loads(result) changed = True except ValueError as e: # In some cases we wish to just report that a change wasn't needed. parsed_result = result if is_skipped(result): result = None else: module.fail_json(msg=result) else: parsed_result = 'Not run in check mode.' module.exit_json( changed=changed, command=str_command, result=parsed_result, ) def build_command(base_command, action, args): return base_command + actions[action] + args def is_skipped(result): skipped = False # Pipeline exists if 'CONTAINER_0201' in result: skipped = True # Pipeline already in desired state elif 'CONTAINER_0102' or 'CONTAINER_0166' in result: skipped = True return skipped if __name__ == '__main__': main()
"""GANDI Simple-Hosting Configuration for Let's Encrypt.""" # pylint: disable=too-many-lines import logging import os import re import xmlrpclib import tempfile import subprocess import zope.interface from acme import challenges try: from letsencrypt import errors from letsencrypt import interfaces from letsencrypt.plugins import common except ImportError: from certbot import errors from certbot import interfaces from certbot.plugins import common logger = logging.getLogger(__name__) UPSTREAM_URL = 'https://github.com/Gandi/letsencrypt-gandi' GANDI_API_URL = 'https://rpc.gandi.net/xmlrpc/' ACME_BASE_PATH = '.well-known/acme-challenge' HTACCESS_PATCH = """ # Patch for Let's Encrypt RewriteEngine off """ def get_user_environment(): new_env = os.environ.copy() if 'SUDO_USER' in new_env: new_env['HOME'] = os.path.expanduser('~' + new_env['SUDO_USER']) new_env['USER'] = new_env['SUDO_USER'] new_env['USERNAME'] = new_env['SUDO_USER'] return new_env class GandiSHSConfigurator(common.Plugin): # pylint: disable=too-many-instance-attributes,too-many-public-methods """GANDI Simple-Hosting configurator. :ivar config: Configuration. :type config: :class:`~letsencrypt.interfaces.IConfig` """ zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller) zope.interface.classProvides(interfaces.IPluginFactory) description = "Gandi Simple Hosting - Alpha" htaccess_content = None _shs_info = None @classmethod def add_parser_arguments(cls, add): add("api-key", help="GANDI api key.") add("name", help="shs name.") add("vhost", default='default', help="vhost") def __init__(self, *args, **kwargs): """Initialize an SHS Configurator. """ self.version = kwargs.pop("version", None) super(GandiSHSConfigurator, self).__init__(*args, **kwargs) def _api(self): api = xmlrpclib.ServerProxy(GANDI_API_URL) return api @property def shs_info(self): if not hasattr(self, 'api_key'): raise errors.PluginError("Api key is missing") if not hasattr(self, 'shs_name'): raise errors.PluginError("Simple hosting name is missing") if self._shs_info: return self._shs_info api = self._api() list = api.paas.list(self.api_key, {'name': self.shs_name}) if not list: raise errors.PluginError( "Couldn't find any match for {0}".format(self.shs_name)) self._shs_info = api.paas.info(self.api_key, list[0]['id']) return self._shs_info # # Plugin Section # def prepare(self): """Prepare the plugin Get apikey and store in config """ self.api_key = self._api_key_from_args() or\ self._api_key_from_env() or\ self._api_key_from_gandi_cli() if not self.api_key: raise errors.PluginError("Api key is missing, couldn't found from " "neither gandi.cli, environment" "(GANDI_API_KEY), nor --{0}" .format(self.option_name('api-key'))) self.shs_name = self.conf('name') if not self.shs_name: raise errors.PluginError("--{0} is a required parameter," "please provide a valid simple hosting " "name".format(self.option_name('name'))) self.vhost = self.conf('vhost') def _api_key_from_gandi_cli(self): """Got cli? grab it https://cli.gandi.net :returns: api key or none :rtype: (None, str) """ logger.info('_api_key_from_gandi_cli') try: from gandi.cli.core.conf import GandiConfig GandiConfig.load_config() return GandiConfig.get('api.key') except ImportError: pass def _api_key_from_env(self): """Looks up key from environment use GANDI_API_KEY :returns: api key or none :rtype: (None, str) """ logger.info('_api_key_from_env') key = os.environ.get('GANDI_API_KEY') if key: if re.match('^[a-zA-Z0-9]{24}$', key): # looks like a gandi api key return key def _api_key_from_args(self): """Looks up key from arguments :returns: api key or none :rtype: (None, str) """ logger.info('_api_key_from_args') return self.conf('api-key') def more_info(self): """Human-readable string to help understand the module""" return ( "Configures GANDI Simple-Hosting to authenticate and install" "HTTPS.{0}Version: {version}".format( os.linesep, version=".".join(str(i) for i in self.version)) ) # # Authenticator Section # def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use """Return list of challenge preferences.""" return [challenges.HTTP01] def perform(self, achalls): """Perform the challenge with a file. """ return [self._perform_single(achall) for achall in achalls] def _lookup_shs(self): paas = self.shs_info return paas['user'], paas['ftp_server'] def _base_path(self): if re.match('^php', self.shs_info['type']): return 'vhosts/{vhost}/htdocs/'.format(vhost=self.vhost) elif re.match('^(python|nodejs)', self.shs_info['type']): return 'vhosts/default' # if ruby return 'vhosts/default/public' def _intermediate_dirs(self): base_path = self._base_path() return [base_path + '/' + dir for dir in [ '', '.well-known/', '.well-known/acme-challenge' ]] def _perform_single(self, achall): response, validation = achall.response_and_validation() path = achall.chall.encode("token") logger.info("Deploying Certificate %s: %s", achall.chall.encode("token"), validation.encode()) user, sftp_url = self._lookup_shs() dirs = self._intermediate_dirs() path = dirs[len(dirs) - 1] destfile = achall.chall.encode("token") try: tmpfile = tempfile.mkstemp(suffix='.letsencrypt.gandi.shs') logger.info("tmpfile = %s", tmpfile) os.write(tmpfile[0], validation.encode()) self._try_shs_auth(user, sftp_url) self._upload_tmpfile( tmpfile[1], user, sftp_url, path, destfile, dirs) self.htaccess_content = self._patch_htaccess( self._base_path(), user, sftp_url) return response finally: os.close(tmpfile[0]) os.remove(tmpfile[1]) def _try_shs_auth(self, user, sftp_url): process = ['sftp', '-o', 'UserKnownHostsFile={home}/.ssh/known_hosts'.format(home=get_user_environment()['HOME']), '{user}@{sftp_url}'.format(user=user, sftp_url=sftp_url)] logger.info("sftp %s", process) sftp = subprocess.Popen(process, stdin=subprocess.PIPE, close_fds=True, env=get_user_environment()) print >> sftp.stdin, 'exit' ret = sftp.wait() if ret != 0: raise errors.PluginError("Couldn't connect to the instance at {url}" .format(url=sftp_url)) def _upload_tmpfile(self, tmpfile, user, sftp_url, path, destfile, mkdir): process = ['sftp', '-b', '-', '-o', 'UserKnownHostsFile={home}/.ssh/known_hosts'.format(home=get_user_environment()['HOME']), '{user}@{sftp_url}'.format(user=user, sftp_url=sftp_url)] logger.info("sftp %s", process) sftp = subprocess.Popen(process, stdin=subprocess.PIPE, close_fds=True, env=get_user_environment()) for p in mkdir: # sftp will abort if any of the following commands fail: # get, put, reget, reput, rename, ln, rm, mkdir, chdir, ls, lchdir, # chmod, chown, chgrp, lpwd, df, symlink, and lmkdir. Termination # on error can be suppressed on a command by command basis by # prefixing the command with a '-' character (for example, # -rm /tmp/blah*). print >> sftp.stdin, '-mkdir {path}'.format(path=p) print >> sftp.stdin, 'cd {path}'.format(path=path) print >> sftp.stdin, 'put {tmpfile} {destfile}'.format( tmpfile=tmpfile, destfile=destfile) print >> sftp.stdin, 'chmod 444 {destfile}'.format(destfile=destfile) print >> sftp.stdin, 'exit' ret = sftp.wait() if ret != 0: raise errors.PluginError("Couldn't place file in domain: {0}" .format(path)) def _patch_htaccess(self, path, user, sftp_url): """Create or patch htaccess Add an exclusion for ACME_BASE_PATH :rtype: (None, str) """ content = None process = ['sftp', '-b', '-', '-o', 'UserKnownHostsFile={home}/.ssh/known_hosts'.format(home=get_user_environment()['HOME']), '{user}@{sftp_url}'.format(user=user, sftp_url=sftp_url)] sftp = subprocess.Popen(process, stdin=subprocess.PIPE, close_fds=True, env=get_user_environment()) print >> sftp.stdin, 'cd {path}/.well-known'.format(path=path) try: tmpfile = tempfile.mkstemp(suffix='.letsencrypt.gandi.shs') print >> sftp.stdin, 'get .htaccess {tmpfile}'.format( tmpfile=tmpfile[1]) print >> sftp.stdin, 'exit' sftp.wait() with open(tmpfile[1], 'r') as htaccess: content = htaccess.read() finally: os.close(tmpfile[0]) os.remove(tmpfile[1]) if content: new_content = content + HTACCESS_PATCH else: new_content = HTACCESS_PATCH sftp = subprocess.Popen(process, stdin=subprocess.PIPE, close_fds=True, env=get_user_environment()) print >> sftp.stdin, 'cd {path}/.well-known'.format(path=path) try: # Patch tmpfile = tempfile.mkstemp(suffix='.letsencrypt.gandi.shs') os.write(tmpfile[0], new_content) # Upload with patch print >> sftp.stdin, 'put {tmpfile} .htaccess'.format( tmpfile=tmpfile[1]) print >> sftp.stdin, 'chmod 644 .htaccess' print >> sftp.stdin, 'exit' sftp.wait() finally: os.close(tmpfile[0]) os.remove(tmpfile[1]) return content def _unpatch_htaccess(self, path, user, sftp_url): """Remove patchs from htaccess :rtype: None """ process = ['sftp', '-b', '-', '-o', 'UserKnownHostsFile={home}/.ssh/known_hosts'.format(home=get_user_environment()['HOME']), '{user}@{sftp_url}'.format(user=user, sftp_url=sftp_url)] sftp = subprocess.Popen(process, stdin=subprocess.PIPE, close_fds=True, env=get_user_environment()) if not self.htaccess_content: print >> sftp.stdin, 'cd {path}/.well-known'.format(path=path) print >> sftp.stdin, '-rm .htaccess' print >> sftp.stdin, 'exit' sftp.wait() else: print >> sftp.stdin, 'cd {path}/.well-known'.format(path=path) try: tmpfile = tempfile.mkstemp(suffix='.letsencrypt.gandi.shs') os.write(tmpfile[0], self.htaccess_content) print >> sftp.stdin, 'put {tmpfile} .htaccess'.format( tmpfile=tmpfile[1]) print >> sftp.stdin, 'exit' sftp.wait() finally: os.close(tmpfile[0]) os.remove(tmpfile[1]) def cleanup(self, achalls): """Revert all challenges.""" user, sftp_url = self._lookup_shs() return [self._cleanup_one(achall, user, sftp_url) for achall in achalls] def _cleanup_one(self, achall, user, sftp_url): """Remove one challenge from the sftp server""" self._unpatch_htaccess(self._base_path(), user, sftp_url) dirs = self._intermediate_dirs() dirs.reverse() path = dirs[0] + "/" + achall.chall.encode("token") process = ['sftp', '-b', '-', '-o', 'UserKnownHostsFile={home}/.ssh/known_hosts'.format(home=get_user_environment()['HOME']), '{user}@{sftp_url}'.format(user=user, sftp_url=sftp_url)] logger.info("sftp %s", process) sftp = subprocess.Popen(process, stdin=subprocess.PIPE, close_fds=True, env=get_user_environment()) print >> sftp.stdin, 'rm {path}'.format(path=path) for p in dirs: # sftp will abort if any of the following commands fail: # get, put, reget, reput, rename, ln, rm, mkdir, chdir, ls, lchdir, # chmod, chown, chgrp, lpwd, df, symlink, and lmkdir. Termination # on error can be suppressed on a command by command basis by # prefixing the command with a '-' character (for example, # -rm /tmp/blah*). print >> sftp.stdin, 'rmdir {path}'.format(path=p) print >> sftp.stdin, 'exit' sftp.wait() # # Installer Section # def get_all_names(self): """Returns all names that may be authenticated. :rtype: `list` of `str` """ return [self.vhost] def deploy_cert(self, domain, cert_path, key_path, chain_path, fullchain_path): """Deploy certificate. :param str domain: domain to deploy certificate file :param str cert_path: absolute path to the certificate file :param str key_path: absolute path to the private key file :param str chain_path: absolute path to the certificate chain file :param str fullchain_path: absolute path to the certificate fullchain file (cert plus chain) :raises .PluginError: when cert cannot be deployed """ api = self._api() with open(cert_path, 'r') as cert: with open(key_path, 'r') as key: api.cert.hosted.create(self.api_key, { 'key': key.read(), 'crt': cert.read() }) def enhance(self, domain, enhancement, options=None): """Perform a configuration enhancement. :param str domain: domain for which to provide enhancement :param str enhancement: An enhancement as defined in :const:`~letsencrypt.constants.ENHANCEMENTS` :param options: Flexible options parameter for enhancement. Check documentation of :const:`~letsencrypt.constants.ENHANCEMENTS` for expected options for each enhancement. :raises .PluginError: If Enhancement is not supported, or if an error occurs during the enhancement. """ raise errors.PluginError( "Unsupported enhancement: {0}".format(enhancement)) def supported_enhancements(self): """Returns a list of supported enhancements. :returns: supported enhancements which should be a subset of :const:`~letsencrypt.constants.ENHANCEMENTS` :rtype: :class:`list` of :class:`str` """ return [] def get_all_certs_keys(self): """Retrieve all certs and keys set in configuration. :returns: tuples with form `[(cert, key, path)]`, where: - `cert` - str path to certificate file - `key` - str path to associated key file - `path` - file path to configuration file :rtype: list """ # TODO def save(self, title=None, temporary=False): """Saves all changes to the configuration files. Both title and temporary are needed because a save may be intended to be permanent, but the save is not ready to be a full checkpoint. If an exception is raised, it is assumed a new checkpoint was not created. :param str title: The title of the save. If a title is given, the configuration will be saved as a new checkpoint and put in a timestamped directory. `title` has no effect if temporary is true. :param bool temporary: Indicates whether the changes made will be quickly reversed in the future (challenges) :raises .PluginError: when save is unsuccessful """ # TODO def rollback_checkpoints(self, rollback=1): """Revert `rollback` number of configuration checkpoints. :raises .PluginError: when configuration cannot be fully reverted """ # TODO def recovery_routine(self): """Revert configuration to most recent finalized checkpoint. Remove all changes (temporary and permanent) that have not been finalized. This is useful to protect against crashes and other execution interruptions. :raises .errors.PluginError: If unable to recover the configuration """ # TODO def view_config_changes(self): """Display all of the LE config changes. :raises .PluginError: when config changes cannot be parsed """ pass def config_test(self): """Make sure the configuration is valid. :raises .MisconfigurationError: when the config is not in a usable state """ pass def restart(self): """Restart or refresh the server content. :raises .PluginError: when server cannot be restarted """ pass # No restart can be implemented for web-accelerator
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The Gumbel distribution class.""" # Dependency imports import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.bijectors import gumbel_cdf as gumbel_cdf_bijector from tensorflow_probability.python.bijectors import identity as identity_bijector from tensorflow_probability.python.bijectors import invert as invert_bijector from tensorflow_probability.python.bijectors import softplus as softplus_bijector from tensorflow_probability.python.distributions import kullback_leibler from tensorflow_probability.python.distributions import transformed_distribution from tensorflow_probability.python.distributions import uniform from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import parameter_properties from tensorflow_probability.python.internal import tensor_util class Gumbel(transformed_distribution.TransformedDistribution): """The scalar Gumbel distribution with location `loc` and `scale` parameters. #### Mathematical details The probability density function (pdf) of this distribution is, ```none pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma)) / sigma ``` where `loc = mu` and `scale = sigma`. The cumulative density function of this distribution is, ```none cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma)) ``` The Gumbel distribution is a member of the [location-scale family]( https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ Gumbel(loc=0, scale=1) Y = loc + scale * X ``` #### Examples Examples of initialization of one or a batch of distributions. ```python tfd = tfp.distributions # Define a single scalar Gumbel distribution. dist = tfd.Gumbel(loc=0., scale=3.) # Evaluate the cdf at 1, returning a scalar. dist.cdf(1.) # Define a batch of two scalar valued Gumbels. # The first has mean 1 and scale 11, the second 2 and 22. dist = tfd.Gumbel(loc=[1, 2.], scale=[11, 22.]) # Evaluate the pdf of the first distribution on 0, and the second on 1.5, # returning a length two tensor. dist.prob([0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. dist.sample([3]) ``` Arguments are broadcast when possible. ```python # Define a batch of two scalar valued Logistics. # Both have mean 1, but different scales. dist = tfd.Gumbel(loc=1., scale=[11, 22.]) # Evaluate the pdf of both distributions on the same point, 3.0, # returning a length 2 tensor. dist.prob(3.0) ``` """ def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Gumbel'): """Construct Gumbel distributions with location and scale `loc` and `scale`. The parameters `loc` and `scale` must be shaped in a way that supports broadcasting (e.g. `loc + scale` is a valid operation). Args: loc: Floating point tensor, the means of the distribution(s). scale: Floating point tensor, the scales of the distribution(s). scale must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. Default value: `False`. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value `NaN` to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. Default value: `True`. name: Python `str` name prefixed to Ops created by this class. Default value: `'Gumbel'`. Raises: TypeError: if loc and scale are different dtypes. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32) loc = tensor_util.convert_nonref_to_tensor( loc, name='loc', dtype=dtype) scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) dtype_util.assert_same_float_dtype([loc, scale]) # Positive scale is asserted by the incorporated Gumbel bijector. self._gumbel_bijector = gumbel_cdf_bijector.GumbelCDF( loc=loc, scale=scale, validate_args=validate_args) # Because the uniform sampler generates samples in `[0, 1)` this would # cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix # this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny` # because it is the smallest, positive, 'normal' number. super(Gumbel, self).__init__( distribution=uniform.Uniform( low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny, high=tf.ones([], dtype=dtype), allow_nan_stats=allow_nan_stats), # The Gumbel bijector encodes the CDF function as the forward, # and hence needs to be inverted. bijector=invert_bijector.Invert( self._gumbel_bijector, validate_args=validate_args), parameters=parameters, name=name) @classmethod def _parameter_properties(cls, dtype, num_classes=None): # pylint: disable=g-long-lambda return dict( loc=parameter_properties.ParameterProperties(), scale=parameter_properties.ParameterProperties( default_constraining_bijector_fn=( lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype))))) # pylint: enable=g-long-lambda @property def loc(self): """Distribution parameter for the location.""" return self._gumbel_bijector.loc @property def scale(self): """Distribution parameter for scale.""" return self._gumbel_bijector.scale experimental_is_sharded = False def _entropy(self): # Use broadcasting rules to calculate the full broadcast sigma. scale = self.scale * tf.ones_like(self.loc) return 1. + tf.math.log(scale) + np.euler_gamma def _log_prob(self, x): scale = tf.convert_to_tensor(self.scale) z = (x - self.loc) / scale return -(z + tf.exp(-z)) - tf.math.log(scale) def _mean(self): return self.loc + self.scale * np.euler_gamma def _stddev(self): return self.scale * tf.ones_like(self.loc) * np.pi / np.sqrt(6) def _mode(self): return self.loc * tf.ones_like(self.scale) def _default_event_space_bijector(self): # TODO(b/145620027) Finalize choice of bijector. Consider switching to # Chain([Softplus(), Log()]) to lighten the doubly-exponential right tail. return identity_bijector.Identity(validate_args=self.validate_args) def _parameter_control_dependencies(self, is_init): return self._gumbel_bijector._parameter_control_dependencies(is_init) # pylint: disable=protected-access @kullback_leibler.RegisterKL(Gumbel, Gumbel) def _kl_gumbel_gumbel(a, b, name=None): """Calculate the batched KL divergence KL(a || b) with a and b Gumbel. Args: a: instance of a Gumbel distribution object. b: instance of a Gumbel distribution object. name: (optional) Name to use for created operations. default is 'kl_gumbel_gumbel'. Returns: Batchwise KL(a || b) """ with tf.name_scope(name or 'kl_gumbel_gumbel'): # Consistent with # http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 64 # The paper uses beta to refer to scale and mu to refer to loc. # There is actually an error in the solution as printed; this is based on # the second-to-last step of the derivation. The value as printed would be # off by (a.loc - b.loc) / b.scale. a_loc = tf.convert_to_tensor(a.loc) b_loc = tf.convert_to_tensor(b.loc) a_scale = tf.convert_to_tensor(a.scale) b_scale = tf.convert_to_tensor(b.scale) return (tf.math.log(b_scale) - tf.math.log(a_scale) + np.euler_gamma * (a_scale / b_scale - 1.) + tf.math.expm1((b_loc - a_loc) / b_scale + tf.math.lgamma(a_scale / b_scale + 1.)) + (a_loc - b_loc) / b_scale)
# office/controllers.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- from .models import ContestOfficeListManager, ContestOfficeManager, CONTEST_OFFICE_UNIQUE_IDENTIFIERS, ContestOffice from ballot.controllers import move_ballot_items_to_another_office from ballot.models import OFFICE from bookmark.models import BookmarkItemList from candidate.controllers import move_candidates_to_another_office from config.base import get_environment_variable from django.contrib import messages from django.http import HttpResponse import json from position.controllers import move_positions_to_another_office, update_all_position_details_from_contest_office import requests import wevote_functions.admin from wevote_functions.functions import positive_value_exists, process_request_from_master logger = wevote_functions.admin.get_logger(__name__) WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY") OFFICES_SYNC_URL = get_environment_variable("OFFICES_SYNC_URL") # officesSyncOut def add_contest_office_name_to_next_spot(contest_office_to_update, google_civic_office_name_to_add): if not positive_value_exists(google_civic_office_name_to_add): return contest_office_to_update if not positive_value_exists(contest_office_to_update.google_civic_office_name): contest_office_to_update.google_civic_office_name = google_civic_office_name_to_add elif google_civic_office_name_to_add == contest_office_to_update.google_civic_office_name: # The value is already stored in contest_office_to_update.google_civic_office_name so doesn't need # to be added to contest_office_to_update.google_civic_office_name2 pass elif not positive_value_exists(contest_office_to_update.google_civic_office_name2): contest_office_to_update.google_civic_office_name2 = google_civic_office_name_to_add elif google_civic_office_name_to_add == contest_office_to_update.google_civic_office_name2: # The value is already stored in contest_office_to_update.google_civic_office_name2 so doesn't need # to be added to contest_office_to_update.google_civic_office_name3 pass elif not positive_value_exists(contest_office_to_update.google_civic_office_name3): contest_office_to_update.google_civic_office_name3 = google_civic_office_name_to_add elif google_civic_office_name_to_add == contest_office_to_update.google_civic_office_name3: # The value is already stored in contest_office_to_update.google_civic_office_name2 so doesn't need # to be added to contest_office_to_update.google_civic_office_name3 pass elif not positive_value_exists(contest_office_to_update.google_civic_office_name4): contest_office_to_update.google_civic_office_name4 = google_civic_office_name_to_add elif google_civic_office_name_to_add == contest_office_to_update.google_civic_office_name4: # The value is already stored in contest_office_to_update.google_civic_office_name2 so doesn't need # to be added to contest_office_to_update.google_civic_office_name3 pass elif not positive_value_exists(contest_office_to_update.google_civic_office_name5): contest_office_to_update.google_civic_office_name5 = google_civic_office_name_to_add # We currently only support 5 alternate names return contest_office_to_update def offices_import_from_sample_file(): """ Get the json data, and either create new entries or update existing :return: """ with open("office/import_data/contest_office_sample.json") as json_data: structured_json = json.load(json_data) return offices_import_from_structured_json(structured_json) def offices_import_from_master_server(request, google_civic_election_id='', state_code=''): """ Get the json data, and either create new entries or update existing :return: """ # Request json file from We Vote servers import_results, structured_json = process_request_from_master( request, "Loading Contest Offices from We Vote Master servers", OFFICES_SYNC_URL, { "key": WE_VOTE_API_KEY, "google_civic_election_id": str(google_civic_election_id), "state_code": state_code, } ) if import_results['success']: results = filter_offices_structured_json_for_local_duplicates(structured_json) filtered_structured_json = results['structured_json'] duplicates_removed = results['duplicates_removed'] import_results = offices_import_from_structured_json(filtered_structured_json) import_results['duplicates_removed'] = duplicates_removed return import_results def fetch_duplicate_office_count(contest_office, ignore_office_we_vote_id_list): if not hasattr(contest_office, 'google_civic_election_id'): return 0 if not positive_value_exists(contest_office.google_civic_election_id): return 0 # Search for other offices within this election that match name and election contest_office_list_manager = ContestOfficeListManager() return contest_office_list_manager.fetch_offices_from_non_unique_identifiers_count( contest_office.google_civic_election_id, contest_office.state_code, contest_office.office_name, ignore_office_we_vote_id_list) def find_duplicate_contest_office(contest_office, ignore_office_we_vote_id_list): if not hasattr(contest_office, 'google_civic_election_id'): error_results = { 'success': False, 'status': "FIND_DUPLICATE_CONTEST_OFFICE_MISSING_OFFICE_OBJECT ", 'contest_office_merge_possibility_found': False, 'contest_office_merge_conflict_values': {}, 'contest_office_list': [], } return error_results if not positive_value_exists(contest_office.google_civic_election_id): error_results = { 'success': False, 'status': "FIND_DUPLICATE_CONTEST_OFFICE_MISSING_GOOGLE_CIVIC_ELECTION_ID ", 'contest_office_merge_possibility_found': False, 'contest_office_merge_conflict_values': {}, 'contest_office_list': [], } return error_results # Search for other contest offices within this election that match name and election contest_office_list_manager = ContestOfficeListManager() try: results = contest_office_list_manager.retrieve_contest_offices_from_non_unique_identifiers( contest_office.office_name, contest_office.google_civic_election_id, contest_office.state_code, contest_office.district_id, contest_office.district_name, contest_office.ballotpedia_race_id, ignore_office_we_vote_id_list) if results['contest_office_found']: contest_office_merge_conflict_values = \ figure_out_office_conflict_values(contest_office, results['contest_office']) results = { 'success': True, 'status': "FIND_DUPLICATE_CONTEST_OFFICE_DUPLICATES_FOUND", 'contest_office_merge_possibility_found': True, 'contest_office_merge_possibility': results['contest_office'], 'contest_office_merge_conflict_values': contest_office_merge_conflict_values, 'contest_office_list': results['contest_office_list'], } return results elif results['contest_office_list_found']: # Only deal with merging the incoming contest office and the first on found contest_office_merge_conflict_values = \ figure_out_office_conflict_values(contest_office, results['contest_office_list'][0]) results = { 'success': True, 'status': "FIND_DUPLICATE_CONTEST_OFFICE_DUPLICATES_FOUND", 'contest_office_merge_possibility_found': True, 'contest_office_merge_possibility': results['contest_office_list'][0], 'contest_office_merge_conflict_values': contest_office_merge_conflict_values, 'contest_office_list': results['contest_office_list'], } return results else: results = { 'success': True, 'status': "FIND_DUPLICATE_CONTEST_OFFICE_NO_DUPLICATES_FOUND", 'contest_office_merge_possibility_found': False, 'contest_office_merge_conflict_values': {}, 'contest_office_list': results['contest_office_list'], } return results except ContestOffice.DoesNotExist: pass except Exception as e: pass results = { 'success': True, 'status': "FIND_DUPLICATE_CONTEST_OFFICE_NO_DUPLICATES_FOUND", 'contest_office_merge_possibility_found': False, } return results def figure_out_office_conflict_values(contest_office1, contest_office2): contest_office_merge_conflict_values = {} for attribute in CONTEST_OFFICE_UNIQUE_IDENTIFIERS: try: contest_office1_attribute = getattr(contest_office1, attribute) contest_office2_attribute = getattr(contest_office2, attribute) if contest_office1_attribute is None and contest_office2_attribute is None: contest_office_merge_conflict_values[attribute] = 'MATCHING' elif contest_office1_attribute is None or contest_office1_attribute is "": if attribute == "maplight_id": if contest_office2_attribute is None or contest_office2_attribute is "" \ or contest_office2_attribute is 0 or contest_office2_attribute is '0': # In certain cases (like maplight_id) we don't want to copy over empty maplight_id contest_office_merge_conflict_values[attribute] = 'MATCHING' else: contest_office_merge_conflict_values[attribute] = 'CONTEST_OFFICE2' else: contest_office_merge_conflict_values[attribute] = 'CONTEST_OFFICE2' elif contest_office2_attribute is None or contest_office2_attribute is "": contest_office_merge_conflict_values[attribute] = 'CONTEST_OFFICE1' else: if attribute == "office_name" or attribute == "state_code": if contest_office1_attribute.lower() == contest_office2_attribute.lower(): contest_office_merge_conflict_values[attribute] = 'MATCHING' else: contest_office_merge_conflict_values[attribute] = 'CONFLICT' elif attribute == "maplight_id": contest_office1_attribute_empty = False contest_office2_attribute_empty = False if not contest_office1_attribute or contest_office1_attribute == 0 \ or contest_office1_attribute is None: contest_office1_attribute_empty = True if not contest_office2_attribute or contest_office2_attribute == 0 \ or contest_office2_attribute is None: contest_office1_attribute_empty = True if contest_office1_attribute == contest_office2_attribute: contest_office_merge_conflict_values[attribute] = 'MATCHING' elif contest_office1_attribute_empty and contest_office2_attribute_empty: contest_office_merge_conflict_values[attribute] = 'MATCHING' else: contest_office_merge_conflict_values[attribute] = 'CONFLICT' else: if contest_office1_attribute == contest_office2_attribute: contest_office_merge_conflict_values[attribute] = 'MATCHING' else: contest_office_merge_conflict_values[attribute] = 'CONFLICT' except AttributeError: pass return contest_office_merge_conflict_values def merge_if_duplicate_offices(office1_on_stage, office2_on_stage, conflict_values): status = "MERGE_IF_DUPLICATE_OFFICES " offices_merged = False decisions_required = False office1_we_vote_id = office1_on_stage.we_vote_id office2_we_vote_id = office2_on_stage.we_vote_id # Are there any comparisons that require admin intervention? merge_choices = {} for attribute in CONTEST_OFFICE_UNIQUE_IDENTIFIERS: conflict_value = conflict_values.get(attribute, None) if conflict_value == "CONFLICT": decisions_required = True break elif conflict_value == "OFFICE2": merge_choices[attribute] = getattr(office2_on_stage, attribute) if decisions_required: success = True status += "DECISION_REQUIRED " else: status += "NO_DECISIONS_REQUIRED " merge_results = merge_these_two_offices(office1_we_vote_id, office2_we_vote_id, merge_choices, office1_on_stage, office2_on_stage) if merge_results['offices_merged']: success = True offices_merged = True else: success = False status += merge_results['status'] results = { 'success': success, 'status': status, 'offices_merged': offices_merged, 'decisions_required': decisions_required, 'office': office1_on_stage, } return results def merge_these_two_offices(contest_office1_we_vote_id, contest_office2_we_vote_id, admin_merge_choices={}, contest_office1_on_stage=None, contest_office2_on_stage=None): """ Process the merging of two offices. Note that this is similar to office/views_admin.py "office_merge_process_view" :param contest_office1_we_vote_id: :param contest_office2_we_vote_id: :param admin_merge_choices: Dictionary with the attribute name as the key, and the chosen value as the value :param contest_office1_on_stage: The first office object if we have it :param contest_office2_on_stage: The second office object if we have it :return: """ status = "" office_manager = ContestOfficeManager() if contest_office1_on_stage and contest_office1_on_stage.we_vote_id: contest_office1_id = contest_office1_on_stage.id contest_office1_we_vote_id = contest_office1_on_stage.we_vote_id else: # Candidate 1 is the one we keep, and Candidate 2 is the one we will merge into Candidate 1 contest_office1_results = \ office_manager.retrieve_contest_office_from_we_vote_id(contest_office1_we_vote_id) if contest_office1_results['contest_office_found']: contest_office1_on_stage = contest_office1_results['contest_office'] contest_office1_id = contest_office1_on_stage.id else: results = { 'success': False, 'status': "MERGE_THESE_TWO_OFFICES-COULD_NOT_RETRIEVE_OFFICE1 ", 'offices_merged': False, 'office': None, } return results if contest_office2_on_stage and contest_office2_on_stage.we_vote_id: contest_office2_id = contest_office2_on_stage.id contest_office2_we_vote_id = contest_office2_on_stage.we_vote_id else: contest_office2_results = \ office_manager.retrieve_contest_office_from_we_vote_id(contest_office2_we_vote_id) if contest_office2_results['contest_office_found']: contest_office2_on_stage = contest_office2_results['contest_office'] contest_office2_id = contest_office2_on_stage.id else: results = { 'success': False, 'status': "MERGE_THESE_TWO_OFFICES-COULD_NOT_RETRIEVE_OFFICE2 ", 'offices_merged': False, 'office': None, } return results # TODO: Migrate bookmarks - for now stop the merge process if there are bookmarks bookmark_item_list_manager = BookmarkItemList() bookmark_results = bookmark_item_list_manager.retrieve_bookmark_item_list_for_contest_office( contest_office2_we_vote_id) if bookmark_results['bookmark_item_list_found']: status += "Bookmarks found for Contest Office 2 - automatic merge not working yet." results = { 'success': False, 'status': status, 'offices_merged': False, 'office': None, } return results # Merge attribute values chosen by the admin for attribute in CONTEST_OFFICE_UNIQUE_IDENTIFIERS: if attribute in admin_merge_choices: setattr(contest_office1_on_stage, attribute, admin_merge_choices[attribute]) # Preserve unique google_civic_office_name, _name2, _name3, _name4, and _name5 if positive_value_exists(contest_office2_on_stage.google_civic_office_name): contest_office1_on_stage = add_contest_office_name_to_next_spot( contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name) if positive_value_exists(contest_office2_on_stage.google_civic_office_name2): contest_office1_on_stage = add_contest_office_name_to_next_spot( contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name2) if positive_value_exists(contest_office2_on_stage.google_civic_office_name3): contest_office1_on_stage = add_contest_office_name_to_next_spot( contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name3) if positive_value_exists(contest_office2_on_stage.google_civic_office_name4): contest_office1_on_stage = add_contest_office_name_to_next_spot( contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name4) if positive_value_exists(contest_office2_on_stage.google_civic_office_name5): contest_office1_on_stage = add_contest_office_name_to_next_spot( contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name5) # Now move candidates attached to this office from_contest_office_id = contest_office2_on_stage.id from_contest_office_we_vote_id = contest_office2_on_stage.we_vote_id to_contest_office_id = contest_office1_on_stage.id to_contest_office_we_vote_id = contest_office1_on_stage.we_vote_id updated_contest_office = contest_office1_on_stage results = move_candidates_to_another_office(from_contest_office_id, from_contest_office_we_vote_id, to_contest_office_id, to_contest_office_we_vote_id, updated_contest_office) if not positive_value_exists(results['success']): results = { 'success': False, 'status': "MERGE_THESE_TWO_OFFICES-COULD_NOT_MOVE_CANDIDATES_TO_OFFICE1 ", 'offices_merged': False, 'office': None, } return results # TODO: Merge quick_info's office details in future # Merge ballot item's office details ballot_items_results = move_ballot_items_to_another_office(contest_office2_id, contest_office2_we_vote_id, contest_office1_id, contest_office1_we_vote_id, contest_office1_on_stage) if not ballot_items_results['success']: status += ballot_items_results['status'] results = { 'success': False, 'status': status, 'offices_merged': False, 'office': updated_contest_office, } return results # Merge public positions public_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id, contest_office1_id, contest_office1_we_vote_id, True) if not public_positions_results['success']: status += public_positions_results['status'] results = { 'success': False, 'status': status, 'offices_merged': False, 'office': updated_contest_office, } return results # Merge friends-only positions friends_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id, contest_office1_id, contest_office1_we_vote_id, False) if not friends_positions_results['success']: status += friends_positions_results['status'] results = { 'success': False, 'status': status, 'offices_merged': False, 'office': updated_contest_office, } return results # TODO: Migrate images? # Note: wait to wrap in try/except block contest_office1_on_stage.save() # There isn't any office data to refresh from other master tables # Remove office 2 contest_office2_on_stage.delete() results = { 'success': True, 'status': status, 'offices_merged': True, 'office': contest_office1_on_stage, } return results def filter_offices_structured_json_for_local_duplicates(structured_json): """ With this function, we remove offices that seem to be duplicates, but have different we_vote_id's :param structured_json: :return: """ office_manager_list = ContestOfficeListManager() duplicates_removed = 0 filtered_structured_json = [] for one_office in structured_json: google_civic_election_id = one_office['google_civic_election_id'] \ if 'google_civic_election_id' in one_office else 0 state_code = one_office['state_code'] if 'state_code' in one_office else '' we_vote_id = one_office['we_vote_id'] if 'we_vote_id' in one_office else '' office_name = one_office['office_name'] if 'office_name' in one_office else '' # district_id = one_office['district_id'] if 'district_id' in one_office else '' # ocd_division_id = one_office['ocd_division_id'] if 'ocd_division_id' in one_office else '' # number_voting_for = one_office['number_voting_for'] if 'number_voting_for' in one_office else '' # number_elected = one_office['number_elected'] if 'number_elected' in one_office else '' # contest_level0 = one_office['contest_level0'] if 'contest_level0' in one_office else '' # contest_level1 = one_office['contest_level1'] if 'contest_level1' in one_office else '' # contest_level2 = one_office['contest_level2'] if 'contest_level2' in one_office else '' # primary_party = one_office['primary_party'] if 'primary_party' in one_office else '' # district_name = one_office['district_name'] if 'district_name' in one_office else '' # district_scope = one_office['district_scope'] if 'district_scope' in one_office else '' # electorate_specifications = one_office['electorate_specifications'] \ # if 'electorate_specifications' in one_office else '' # special = one_office['special'] if 'special' in one_office else '' # maplight_id = one_office['maplight_id'] if 'maplight_id' in one_office else 0 # ballotpedia_id = one_office['ballotpedia_id'] if 'ballotpedia_id' in one_office else '' # wikipedia_id = one_office['wikipedia_id'] if 'wikipedia_id' in one_office else '' # Check to see if there is an entry that matches in all critical ways, minus the we_vote_id we_vote_id_from_master = we_vote_id results = office_manager_list.retrieve_possible_duplicate_offices(google_civic_election_id, state_code, office_name, we_vote_id_from_master) if results['office_list_found']: # There seems to be a duplicate already in this database using a different we_vote_id duplicates_removed += 1 else: filtered_structured_json.append(one_office) offices_results = { 'success': True, 'status': "FILTER_OFFICES_PROCESS_COMPLETE", 'duplicates_removed': duplicates_removed, 'structured_json': filtered_structured_json, } return offices_results def offices_import_from_structured_json(structured_json): office_manager = ContestOfficeManager() offices_saved = 0 offices_updated = 0 offices_not_processed = 0 for one_office in structured_json: google_civic_election_id = one_office['google_civic_election_id'] \ if 'google_civic_election_id' in one_office else 0 we_vote_id = one_office['we_vote_id'] if 'we_vote_id' in one_office else '' if positive_value_exists(google_civic_election_id) and positive_value_exists(we_vote_id): state_code = one_office['state_code'] if 'state_code' in one_office else '' district_id = one_office['district_id'] if 'district_id' in one_office else '' office_name = one_office['office_name'] if 'office_name' in one_office else '' google_ballot_placement = one_office['google_ballot_placement'] \ if 'google_ballot_placement' in one_office else '' google_civic_office_name = one_office['google_civic_office_name'] \ if 'google_civic_office_name' in one_office else '' google_civic_office_name2 = one_office['google_civic_office_name2'] \ if 'google_civic_office_name2' in one_office else '' google_civic_office_name3 = one_office['google_civic_office_name3'] \ if 'google_civic_office_name3' in one_office else '' google_civic_office_name4 = one_office['google_civic_office_name4'] \ if 'google_civic_office_name4' in one_office else '' google_civic_office_name5 = one_office['google_civic_office_name5'] \ if 'google_civic_office_name5' in one_office else '' ocd_division_id = one_office['ocd_division_id'] if 'ocd_division_id' in one_office else '' number_voting_for = one_office['number_voting_for'] if 'number_voting_for' in one_office else '' number_elected = one_office['number_elected'] if 'number_elected' in one_office else '' contest_level0 = one_office['contest_level0'] if 'contest_level0' in one_office else '' contest_level1 = one_office['contest_level1'] if 'contest_level1' in one_office else '' contest_level2 = one_office['contest_level2'] if 'contest_level2' in one_office else '' primary_party = one_office['primary_party'] if 'primary_party' in one_office else '' district_name = one_office['district_name'] if 'district_name' in one_office else '' district_scope = one_office['district_scope'] if 'district_scope' in one_office else '' electorate_specifications = one_office['electorate_specifications'] \ if 'electorate_specifications' in one_office else '' special = one_office['special'] if 'special' in one_office else '' maplight_id = one_office['maplight_id'] if 'maplight_id' in one_office else 0 ballotpedia_id = one_office['ballotpedia_id'] if 'ballotpedia_id' in one_office else '' # Equivalent to elected_office ballotpedia_office_id = one_office['ballotpedia_office_id'] if 'ballotpedia_office_id' in one_office else '' ballotpedia_office_name = one_office['ballotpedia_office_name'] \ if 'ballotpedia_office_name' in one_office else '' ballotpedia_office_url = one_office['ballotpedia_office_url'] \ if 'ballotpedia_office_url' in one_office else '' # Equivalent to contest_office ballotpedia_race_id = one_office['ballotpedia_race_id'] if 'ballotpedia_race_id' in one_office else '' ballotpedia_race_office_level = one_office['ballotpedia_race_office_level'] \ if 'ballotpedia_race_office_level' in one_office else '' wikipedia_id = one_office['wikipedia_id'] if 'wikipedia_id' in one_office else '' updated_contest_office_values = { 'we_vote_id': we_vote_id, 'google_civic_election_id': google_civic_election_id, 'state_code': state_code, 'district_id': district_id, 'district_name': district_name, 'office_name': office_name, 'google_ballot_placement': google_ballot_placement, 'google_civic_office_name': google_civic_office_name, 'google_civic_office_name2': google_civic_office_name2, 'google_civic_office_name3': google_civic_office_name3, 'google_civic_office_name4': google_civic_office_name4, 'google_civic_office_name5': google_civic_office_name5, 'ocd_division_id': ocd_division_id, 'number_voting_for': number_voting_for, 'number_elected': number_elected, 'contest_level0': contest_level0, 'contest_level1': contest_level1, 'contest_level2': contest_level2, 'primary_party': primary_party, 'district_scope': district_scope, 'electorate_specifications': electorate_specifications, 'special': special, 'maplight_id': maplight_id, 'ballotpedia_id': ballotpedia_id, 'ballotpedia_office_id': ballotpedia_office_id, 'ballotpedia_office_name': ballotpedia_office_name, 'ballotpedia_office_url': ballotpedia_office_url, 'ballotpedia_race_id': ballotpedia_race_id, 'ballotpedia_race_office_level': ballotpedia_race_office_level, 'wikipedia_id': wikipedia_id, } results = office_manager.update_or_create_contest_office( we_vote_id, maplight_id, google_civic_election_id, office_name, district_id, updated_contest_office_values) else: offices_not_processed += 1 results = { 'success': False, 'status': 'Required value missing, cannot update or create' } if results['success']: if results['new_office_created']: offices_saved += 1 else: offices_updated += 1 offices_results = { 'success': True, 'status': "OFFICE_IMPORT_PROCESS_COMPLETE", 'saved': offices_saved, 'updated': offices_updated, 'not_processed': offices_not_processed, } return offices_results def office_retrieve_for_api(office_id, office_we_vote_id): """ Used by the api :param office_id: :param office_we_vote_id: :return: """ # NOTE: Office retrieve is independent of *who* wants to see the data. Office retrieve never triggers # a ballot data lookup from Google Civic, like voterBallotItemsFromGoogleCivic does if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id): status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING' json_data = { 'status': status, 'success': False, 'kind_of_ballot_item': OFFICE, 'id': office_id, 'we_vote_id': office_we_vote_id, 'google_civic_election_id': 0, 'state_code': '', } return HttpResponse(json.dumps(json_data), content_type='application/json') office_manager = ContestOfficeManager() if positive_value_exists(office_id): results = office_manager.retrieve_contest_office_from_id(office_id) success = results['success'] status = results['status'] elif positive_value_exists(office_we_vote_id): results = office_manager.retrieve_contest_office_from_we_vote_id(office_we_vote_id) success = results['success'] status = results['status'] else: status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING_2' # It should be impossible to reach this json_data = { 'status': status, 'success': False, 'kind_of_ballot_item': OFFICE, 'id': office_id, 'we_vote_id': office_we_vote_id, 'google_civic_election_id': 0, 'state_code': '', } return HttpResponse(json.dumps(json_data), content_type='application/json') if success: contest_office = results['contest_office'] json_data = { 'status': status, 'success': True, 'kind_of_ballot_item': OFFICE, 'id': contest_office.id, 'we_vote_id': contest_office.we_vote_id, 'google_civic_election_id': contest_office.google_civic_election_id, 'state_code': contest_office.state_code, 'ballot_item_display_name': contest_office.office_name, 'ocd_division_id': contest_office.ocd_division_id, 'maplight_id': contest_office.maplight_id, 'ballotpedia_id': contest_office.ballotpedia_id, 'ballotpedia_office_id': contest_office.ballotpedia_office_id, 'ballotpedia_office_url': contest_office.ballotpedia_office_url, 'ballotpedia_race_id': contest_office.ballotpedia_race_id, 'wikipedia_id': contest_office.wikipedia_id, 'number_voting_for': contest_office.number_voting_for, 'number_elected': contest_office.number_elected, 'primary_party': contest_office.primary_party, 'district_name': contest_office.district_name, } else: json_data = { 'status': status, 'success': False, 'kind_of_ballot_item': OFFICE, 'id': office_id, 'we_vote_id': office_we_vote_id, 'google_civic_election_id': 0, 'state_code': '', } return HttpResponse(json.dumps(json_data), content_type='application/json') def push_contest_office_data_to_other_table_caches(contest_office_id=0, contest_office_we_vote_id=''): contest_office_manager = ContestOfficeManager() if positive_value_exists(contest_office_we_vote_id): results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office_we_vote_id) elif positive_value_exists(contest_office_id): results = contest_office_manager.retrieve_contest_office_from_id(contest_office_id) if results['contest_office_found']: contest_office = results['contest_office'] save_position_from_office_results = update_all_position_details_from_contest_office(contest_office) return save_position_from_office_results else: results = { 'success': False, 'positions_updated_count': 0, 'positions_not_updated_count': 0, 'update_all_position_results': [] } return results
# --- import -------------------------------------------------------------------------------------- import collections import numpy as np import WrightTools as wt from ..mixed import propagate # --- functions ----------------------------------------------------------------------------------- def do_work(arglist): indices, iprime, H, pulse_class = arglist[:4] efpi, pm, timestep, eb, lb, evolve_func = arglist[4:] # need to declare these for each function pulse_class.early_buffer = eb pulse_class.late_buffer = lb pulse_class.timestep = timestep t, efields = pulse_class.pulse(efpi, pm=pm) out = evolve_func(t, efields, iprime, H) #if indices[-1] == 0: # print(indices, pulse_class.timestep, str(iprime) + ' \r',) return indices, out # --- class --------------------------------------------------------------------------------------- class Scan: def __init__(self, experiment, hamiltonian): self.exp = experiment self.ham = hamiltonian # unpack experiment self.axis_objs = self.exp.active_axes self.pulse_class = self.exp.pulse_class self.cols = self.pulse_class.cols self.npulses = len(self.exp.pulses) self.pm = self.exp.pm self.early_buffer = self.exp.early_buffer self.late_buffer = self.exp.late_buffer self.timestep = self.exp.timestep # initialize self.coords_set = [] self.iprime = np.arange(-self.early_buffer, self.late_buffer, self.timestep).size self.shape = tuple(a.points.size for a in self.exp.active_axes) self.array = np.zeros(self.shape) self.efp = self._gen_efp() def _gen_efp(self, indices=None): """Get an array containing all parameters of efields. Parameters ---------- indicies : array of integers (optional) Specific indicies to look up parameters for. If None, all indicies are looked up. Default is None. Returns ------- numpy ndarray Array in (axes..., pulse, parameter). """ efp = np.zeros(self.shape + (self.npulses, len(self.cols))) for pulse_index in range(self.npulses): axes = [a for a in self.exp.axes if pulse_index in a.pulses] for axis in axes: parameter_index = self.cols.index(axis.parameter) if axis.active: axis_index = self.exp.active_axes.index(axis) points = axis.points.copy() for _ in range(axis_index, len(self.exp.active_axes) - 1): points.shape += (1,) efp[..., pulse_index, parameter_index] = points else: efp[..., pulse_index, parameter_index] = axis.points return efp kernel_cuda_source = """ __global__ void kernel(double time_start, double time_end, double dt, int nEFields, double* efparams, int* phase_matching, int n_recorded, Hamiltonian* ham, pycuda::complex<double>* out) { int idx = threadIdx.x + blockIdx.x * blockDim.x; runge_kutta(time_start, time_end, dt, nEFields, efparams + (idx * 5 * nEFields), phase_matching, n_recorded, *ham, out + (idx * ham->nRecorded * n_recorded)); } """ def run(self, mp='cpu', chunk=False): """Run the scan. Parameters ---------- mp : {False, 'cpu', 'gpu'} (optional) Select multiprocessing: False (or '' or None) means single-threaded. 'gpu' indicates to use the CUDA implementation Any other value which evaluates to ``True`` indicates cpu multiprocessed. Default is 'cpu'. Returns numpy ndarray Array in (axes..., outgroups, time) """ shape = list(self.array.shape) shape.append(len(self.ham.recorded_indices)) shape.append(self.iprime) self.pulse_class.timestep = self.timestep self.pulse_class.early_buffer = self.early_buffer self.pulse_class.late_buffer = self.late_buffer self.pulse_class.pm = self.pm self.sig = np.empty(shape, dtype=np.complex128) if mp == 'gpu': from pycuda import driver as cuda from pycuda.compiler import SourceModule from pycuda import autoinit hamPtr = cuda.mem_alloc(self.ham.cuda_mem_size) self.ham.to_device(hamPtr) efpPtr = cuda.to_device(self.efp) pmPtr = cuda.to_device(np.array(self.pm, dtype=np.int32)) sigPtr = cuda.mem_alloc(self.sig.nbytes) d_ind = self.pulse_class.cols.index('d') start = np.min(self.efp[..., d_ind]) - self.early_buffer stop = np.max(self.efp[..., d_ind]) + self.late_buffer mod = SourceModule(self.ham.cuda_struct + self.ham.cuda_matrix_source + propagate.muladd_cuda_source + propagate.dot_cuda_source + propagate.pulse_cuda_source + propagate.runge_kutta_cuda_source + Scan.kernel_cuda_source) kernel = mod.get_function('kernel') kernel(start, stop, np.float64(self.timestep), np.intp(3), efpPtr, pmPtr, np.intp(self.iprime), hamPtr, sigPtr, grid=(self.array.size//256,1), block=(256,1,1)) cuda.memcpy_dtoh(self.sig, sigPtr) elif mp: from multiprocessing import Pool, cpu_count arglist = [[ind, self.iprime, self.ham, self.pulse_class, self.efp[ind], self.pm, self.timestep, self.early_buffer, self.late_buffer, self.ham.propagator] for ind in np.ndindex(self.array.shape)] pool = Pool(processes=cpu_count()) chunksize = int(self.array.size / cpu_count()) #print('chunksize:', chunksize) #with wt.kit.Timer(): results = pool.map(do_work, arglist, chunksize=chunksize) pool.close() pool.join() # now write to the np array for i in range(len(results)): self.sig[results[i][0]] = results[i][1] del results else: #with wt.kit.Timer(): for idx in np.ndindex(self.shape): t, efields = self.pulse_class.pulse(self.efp[idx], pm=self.pm) self.sig[idx] = self.ham.propagator(t, efields, self.iprime, self.ham) return self.sig def get_color(self): """Get an array of driven signal frequency for each array point.""" # in wavenumbers w_axis = self.cols['w'] wtemp = self.efp[..., w_axis].copy() wtemp *= self.pm wm = wtemp.sum(axis=-1) return wm def efields(self, windowed=True): """Return the e-fields used in the simulation. Parameters ---------- windowed : boolean (optional) If True, only returns values that are within the early and late buffer. Default is True. Returns ------- numpy ndarray Array in (axes..., pulse, time). """ # [axes..., numpulses, nparams] efp = self.efp # [axes..., numpulses, pulse field values] efields_shape = list(efp.shape) if windowed: efields_shape[-1] = self.iprime efields = np.zeros((efields_shape), dtype=np.complex) with wt.kit.Timer(): for ind in np.ndindex(tuple(efields_shape[:-2])): ti, efi = self.pulse_class.pulse(efp[ind], pm=self.pm) efields[ind] = efi[:, -self.iprime:] else: # figure out the biggest array size we will get d_ind = self.pulse_class.cols['d'] t = self.pulse_class.get_t(efp[..., d_ind]) # now that we know t vals, we can set fixed bounds self.pulse_class.fixed_bounds_min = t.min() self.pulse_class.fixed_bounds_max = t.max() self.pulse_class.fixed_bounds = True efields_shape[-1] = t.size efields = np.zeros((efields_shape), dtype=np.complex) try: with wt.kit.Timer(): for ind in np.ndindex(tuple(efields_shape[:-2])): ti, efi = self.pulse_class.pulse(efp[ind], pm=self.pm) efields[ind] = efi finally: # set the class back to what it was before exiting self.pulse_class.fixed_bounds = False return efields
from django.db.transaction import non_atomic_requests from django.utils.translation import ( ugettext, ugettext_lazy as _, pgettext_lazy) import jingo import jinja2 from olympia import amo from olympia.amo.helpers import urlparams from olympia.amo.urlresolvers import reverse from olympia.amo.utils import render @jinja2.contextfunction def install_button(context, addon, version=None, show_contrib=True, show_warning=True, src='', collection=None, size='', detailed=False, impala=False, latest_beta=False): """ If version isn't given, we use the latest version. You can set latest_beta parameter to use latest beta version instead. """ assert not (version and latest_beta), ( 'Only one of version and latest_beta can be specified') request = context['request'] app, lang = context['APP'], context['LANG'] src = src or context.get('src') or request.GET.get('src', '') collection = ((collection.uuid if hasattr(collection, 'uuid') else None) or collection or context.get('collection') or request.GET.get('collection') or request.GET.get('collection_id') or request.GET.get('collection_uuid')) button = install_button_factory(addon, app, lang, version, show_contrib, show_warning, src, collection, size, detailed, impala, latest_beta) installed = (request.user.is_authenticated() and addon.id in request.user.mobile_addons) c = {'button': button, 'addon': addon, 'version': button.version, 'installed': installed} if impala: template = 'addons/impala/button.html' else: template = 'addons/button.html' t = jingo.render_to_string(request, template, c) return jinja2.Markup(t) @jinja2.contextfunction def big_install_button(context, addon, **kwargs): from olympia.addons.helpers import statusflags flags = jinja2.escape(statusflags(context, addon)) button = install_button(context, addon, detailed=True, size='prominent', **kwargs) markup = u'<div class="install-wrapper %s">%s</div>' % (flags, button) return jinja2.Markup(markup) def install_button_factory(*args, **kwargs): button = InstallButton(*args, **kwargs) # Order matters. We want to highlight unreviewed before featured. They # should be mutually exclusive, but you never know. classes = (('is_persona', PersonaInstallButton), ('unreviewed', UnreviewedInstallButton), ('experimental', ExperimentalInstallButton), ('featured', FeaturedInstallButton)) for pred, cls in classes: if getattr(button, pred, False): button.__class__ = cls break button.prepare() return button class InstallButton(object): button_class = ['download'] install_class = [] install_text = '' def __init__(self, addon, app, lang, version=None, show_contrib=True, show_warning=True, src='', collection=None, size='', detailed=False, impala=False, latest_beta=False): self.addon, self.app, self.lang = addon, app, lang self.latest = version is None self.version = version if not self.version: self.version = (addon.current_beta_version if latest_beta else addon.current_version) self.src = src self.collection = collection self.size = size self.detailed = detailed self.impala = impala self.is_beta = self.version and self.version.is_beta version_unreviewed = self.version and self.version.is_unreviewed self.experimental = addon.is_experimental self.unreviewed = (addon.is_unreviewed() or version_unreviewed or self.is_beta) self.featured = (not self.unreviewed and not self.experimental and not self.is_beta and addon.is_featured(app, lang)) self.is_persona = addon.type == amo.ADDON_PERSONA self._show_contrib = show_contrib self.show_contrib = (show_contrib and addon.takes_contributions and addon.annoying == amo.CONTRIB_ROADBLOCK) self.show_warning = show_warning and self.unreviewed def prepare(self): """Called after the class is set to manage contributions.""" # Get a copy for this instance. self.button_class = list(self.__class__.button_class) self.install_class = list(self.__class__.install_class) if self.show_contrib: try: self.button_class.remove('download') except ValueError: pass self.button_class += ['contrib', 'go'] self.install_class.append('contrib') if self.size: self.button_class.append(self.size) if self.is_beta: self.install_class.append('beta') def attrs(self): rv = {} addon = self.addon if (self._show_contrib and addon.takes_contributions and addon.annoying == amo.CONTRIB_AFTER): rv['data-after'] = 'contrib' if addon.type == amo.ADDON_SEARCH: rv['data-search'] = 'true' return rv def links(self): if not self.version: return [] rv = [] files = [f for f in self.version.all_files if f.status in amo.VALID_FILE_STATUSES] for file in files: text, url, os = self.file_details(file) rv.append(Link(text, self.fix_link(url), os, file)) return rv def file_details(self, file): platform = file.platform if self.latest and not self.is_beta and ( self.addon.status == file.status == amo.STATUS_PUBLIC): url = file.latest_xpi_url() elif self.latest and self.is_beta and self.addon.show_beta: url = file.latest_xpi_url(beta=True) else: url = file.get_url_path(self.src) if platform == amo.PLATFORM_ALL.id: text, os = ugettext('Download Now'), None else: text, os = ugettext('Download'), amo.PLATFORMS[platform] if self.show_contrib: # L10n: please keep &nbsp; in the string so &rarr; does not wrap. text = jinja2.Markup(ugettext('Continue to Download&nbsp;&rarr;')) roadblock = reverse('addons.roadblock', args=[self.addon.id]) url = urlparams(roadblock, version=self.version.version) return text, url, os def fix_link(self, url): if self.src: url = urlparams(url, src=self.src) if self.collection: url = urlparams(url, collection_id=self.collection) return url class FeaturedInstallButton(InstallButton): install_class = ['featuredaddon'] install_text = _(u'Featured') class UnreviewedInstallButton(InstallButton): install_class = ['unreviewed'] install_text = pgettext_lazy('install_button', u'Not Reviewed') button_class = 'download caution'.split() class ExperimentalInstallButton(InstallButton): install_class = ['lite'] button_class = ['caution'] install_text = pgettext_lazy('install_button', u'Experimental') class PersonaInstallButton(InstallButton): install_class = ['persona'] def links(self): return [Link(ugettext(u'Add to {0}').format(unicode(self.app.pretty)), reverse('addons.detail', args=[amo.PERSONAS_ADDON_ID]))] def attrs(self): rv = super(PersonaInstallButton, self).attrs() rv['data-browsertheme'] = self.addon.persona.json_data return rv class Link(object): def __init__(self, text, url, os=None, file=None): self.text, self.url, self.os, self.file = text, url, os, file @non_atomic_requests def js(request): return render(request, 'addons/popups.html', content_type='text/javascript')
#!/usr/bin/python2 ''' MIT License Copyright (c) 2017 LIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import os, argparse, fnmatch from struct import pack, unpack, calcsize DEFAULT_HASH_KEY = 0x65 class Sarc(object): """SHArchive class A class for handling SHArchive. Attributes: header: Archive file header fatheader: FAT block header entries: File entries fnt_data: Binary File Name Table (FNT) data archive_data: Archive file data """ def __init__(self, path='', order='', hash_key=DEFAULT_HASH_KEY, exclude=[]): """Initialize Sarc class. Args: path: Path to an archive file when initializing with an archive for extraction or adding files, or path to a directory when initializing with a directory for creation. order: Required only if you are creating an archive. Must be '>' or '<'. hash_key: Required only if you are creating an archive. Default 0x65 (101). Returns: None """ self.exclude = exclude if os.path.isfile(path): (self.header, self.fatheader, self.entries, self.fnt_data, self.archive_data) = self._read_archive(path) elif os.path.isdir(path): self._base_path = path self._create_archive(order, hash_key) def _create_archive(self, order, hash_key): self.header = Sarc.ArchiveBlockHeader(order=order) self.fatheader = Sarc.FATBlockHeader(order=order, hash_key=hash_key) self.entries = None file_list = walk(self._base_path) for f in file_list: self._add_file_entry(f) self.fnt_data = '' self.archive_data = '' def _read_archive(self, path): cur_pos = 0 data = open(path,'rb').read() header = Sarc.ArchiveBlockHeader(data[cur_pos:cur_pos + Sarc.ArchiveBlockHeader.C_STRUCTURE_SIZE]) cur_pos += header.header_size fatheader = Sarc.FATBlockHeader(data=data[cur_pos:cur_pos + Sarc.FATBlockHeader.C_STRUCTURE_SIZE], order=header.order) cur_pos += fatheader.header_size fatentries = [] for i in range(fatheader.file_count): fatentries.append(Sarc.FATEntry(data=data[cur_pos:cur_pos + Sarc.FATEntry.C_STRUCTURE_SIZE], order=header.order)) cur_pos += Sarc.FATEntry.C_STRUCTURE_SIZE entries = {e.hash:e for e in fatentries} fntheader = Sarc.FNTBlockHeader(data=data[cur_pos:cur_pos+Sarc.FNTBlockHeader.C_STRUCTURE_SIZE], order=header.order) cur_pos += fntheader.header_size fnt_data = data[cur_pos:header.data_block_offset] archive_data = data[header.data_block_offset:] return header, fatheader, entries, fnt_data, archive_data def add_file_entry(self, path): """Add a file entry from file system to the 'entries' attribute. Args: path: Path to the file. Returns: None """ for pat in self.exclude: if fnmatch.fnmatch(path, pat): return False entry = Sarc.FATEntry(order=self.header.order, base_path=self._base_path, file_path=path, hash_key=self.fatheader.hash_key) if self.entries: self.entries[entry.hash] = entry else: self.entries = {entry.hash:entry} return True _add_file_entry = add_file_entry def archive(self, archive_path, verbose=False): """Archive the Sarc class instance to a binary file. Args: archive_path: Path to output. verbose: Print verbose information. Returns: None """ fnt_list = [] data_list = [] packed_fat_entries = [] cur_fnt_offset = len(self.fnt_data) cur_data_offset = len(self.archive_data) sorted_entries = [self.entries[k] for k in sorted(self.entries.keys())] for e in sorted_entries: cur_fnt_offset, cur_data_offset = e.archive( fnt_list, data_list, cur_fnt_offset, cur_data_offset) packed_fat_entries.append(e.pack()) self.fatheader.file_count += 1 if verbose: print 'Archived:', e.r_path if self.fatheader.file_count > Sarc.FATBlockHeader._C_ARCHIVE_ENTRY_MAX: print 'WARNING: File entries exceed.' archived_data = ''.join([self.header.pack(), self.fatheader.pack()]) archived_data += ''.join(packed_fat_entries) archived_data += Sarc.FNTBlockHeader(order=self.header.order).pack() archived_data += self.fnt_data + ''.join(fnt_list) archived_data += (align(len(archived_data), 0x100) - len(archived_data)) * '\x00' #Dumn self.header.data_block_offset = len(archived_data) archived_data += self.archive_data + ''.join(data_list) self.header.file_size = len(archived_data) archive_file = open(archive_path, 'wb') archive_file.write(archived_data) archive_file.seek(0, 0) archive_file.write(self.header.pack()) archive_file.close() def extract(self, path, all=False, name=None, hash=0, save_file=True, verbose=False): """Extract archived files. Args: path: Path to output. all: Extract all files. name: File name to extract. hash: Hash of the file to extract. If 'name' argument is set, this argument will be ignored. save_file: Save the file to file system. False for listing file(s). verbose: Print verbose infomation. Returns: None Raises: KeyError: When input file name or hash doesn't exist. """ if all: for k in sorted(self.entries): self.extract(path, all=False, name=None, hash=k, save_file=save_file, verbose=verbose) else: if name: hash = calchash(name, self.header.hash_key) if hash: r_path, full_path = self.entries[hash].extract(self.fnt_data, self.archive_data, path, save_file) if save_file and full_path and verbose: print 'Saved:', full_path elif not save_file and r_path: print 'Hash: %08X Path: %s'%(hash, r_path) class BlockHeader(object): """Base class of blocks header. Attributes: signature: Signature of the class instance. header_size: Header size of the class instance. C_SIGNATURE: Constant signature value. C_STRUCTURE_SIZE: Constant structure size. """ def check_valid(self): """Check if the class instance is valid. Raises: ValueError: Error occurred when class attribute invalid. """ if self.signature != self.C_SIGNATURE: raise ValueError('Invalid signature ( except: "%s", actual: "%s" )' %(self.C_SIGNATURE, self.signature)) if self.header_size != self.C_STRUCTURE_SIZE: raise ValueError('Invalid header size ( except: %x, actual: %x )' %(self.C_STRUCTURE_SIZE, self.header_size)) class ArchiveBlockHeader(BlockHeader): """Archive block header class. Attributes: signature: Signature of the class instance. header_size: Header size of the class instance. bom: Byte-order mark. Always 0xfeff. file_size: Archive file size. data_block_offset: Data block offset relate to zero. version: Archive version. order: Byte order. C_SIGNATURE: Constant signature value. C_STRUCTURE_SIZE: Constant structure size. HEADER_STRUCT: Structure of the binary archive's header. """ HEADER_STRUCT = '4sHHIIHH' C_STRUCTURE_SIZE = calcsize(HEADER_STRUCT) C_SIGNATURE = 'SARC' _C_ARCHIVE_VERSION = 0x0100 def __init__(self, data=None, order=''): """Initialize ArchiveBlockHeader class. Args: data: Required only if you are initializing the Sarc class with an archive. order: Required only if you are creating an archive. Must be '>' or '<'. Returns: None """ if data: bom = data[6:8] self.order = '<' if (bom == '\xff\xfe') else '>' (self.signature, self.header_size, self.bom, self.file_size, self.data_block_offset, self.version, reserved) = unpack(self.order + self.HEADER_STRUCT, data[:self.C_STRUCTURE_SIZE]) self._check_valid() else: self.order = order self.signature = self.C_SIGNATURE self.header_size = self.C_STRUCTURE_SIZE self.bom = 0xfeff self.file_size = 0 self.data_block_offset = 0 self.version = self._C_ARCHIVE_VERSION def check_valid(self): """Check if the class instance is valid. Raises: ValueError: Error occurred when class attribute invalid. """ super(Sarc.ArchiveBlockHeader, self).check_valid() if self.bom != 0xfeff: raise ValueError('Invalid BOM value ( except: %x, actual: %x )' %(0xfeff, self.bom)) if self.version != self._C_ARCHIVE_VERSION: raise ValueError('Invalid archive version ( except: %x, actual: %x )' %(self._C_ARCHIVE_VERSION, self.version)) _check_valid = check_valid def pack(self): """Pack the class instance to a str according to 'HEADER_STRUCT'. Args: None Returns: Packed structure data. """ return pack(self.order + self.HEADER_STRUCT, self.C_SIGNATURE, self.header_size, self.bom, self.file_size, self.data_block_offset, self.version, 0) class FATBlockHeader(BlockHeader): """Archive file entry block header class. Attributes: signature: Signature of the class instance. header_size: Header size of the class instance. file_count: Number of file entries. hash_key: Hash key of file name hash. order: Byte order. C_SIGNATURE: Constant signature value. C_STRUCTURE_SIZE: Constant structure size. HEADER_STRUCT: Structure of the binary archive's FAT header. """ HEADER_STRUCT = '4sHHI' C_STRUCTURE_SIZE = calcsize(HEADER_STRUCT) C_SIGNATURE = 'SFAT' _C_ARCHIVE_ENTRY_MAX = 0x3fff def __init__(self, data=None, order='', hash_key=DEFAULT_HASH_KEY): self.order = order if data: (self.signature, self.header_size, self.file_count, self.hash_key) = unpack(order + self.HEADER_STRUCT, data[:self.C_STRUCTURE_SIZE]) self._check_valid() else: self.signature = self.C_SIGNATURE self.header_size = self.C_STRUCTURE_SIZE self.file_count = 0 self.hash_key = hash_key def check_valid(self): """Check if the class instance is valid. Raises: ValueError: Error occurred when class attribute invalid. """ super(Sarc.FATBlockHeader, self).check_valid() if self.file_count > self._C_ARCHIVE_ENTRY_MAX: raise ValueError('Invalid file count: %x'%self.file_count) _check_valid = check_valid def pack(self): """Pack the class instance to a str according to 'HEADER_STRUCT'. Args: None Returns: Packed structure data. """ return pack(self.order + self.HEADER_STRUCT, self.C_SIGNATURE, self.header_size, self.file_count, self.hash_key) class FATEntry(object): """Archive file entry class. Attributes: hash: File name hash name_offset: File name offset. Relate to file name table start. data_start_offset: File data start offset. Relate to file data block start. data_end_offset: File data end offset. Relate to file data block start. order: Byte order. type: Entry type. C_STRUCTURE_SIZE: Constant structure size. ENTYR_STRUCT: Structure of the binary archive entry. ARCHIVED: Archived file entry. FILESYSTEM: File system file entry. """ ENTYR_STRUCT = 'IIII' C_STRUCTURE_SIZE = calcsize(ENTYR_STRUCT) _C_FNT_ALIGNMENT = 4 ARCHIVED = 0 FILESYSTEM = 1 def __init__(self, data=None, order='', base_path='', file_path='', hash_key=DEFAULT_HASH_KEY): self.order = order if data: self.type = self.ARCHIVED (self.hash, self.name_offset, self.data_start_offset, self.data_end_offset) = unpack(order + self.ENTYR_STRUCT, data[:self.C_STRUCTURE_SIZE]) self._check_valid() else: self.type = self.FILESYSTEM self.path = file_path self.r_path = getrpath(base_path, file_path) self.hash = calchash(self.r_path, hash_key) self.name_offset = 0 self.data_start_offset = 0 self.data_end_offset = 0 def _align_data(self, data, cur_pos): if self._is_bflim(data): alignment = self._read_bflim_alignment(data) return align(cur_pos, alignment) - cur_pos else: return 0 def _align_fn(self, fn, alignment): return align(len(fn), alignment) - len(fn) def _is_bflim(self, data): return ((data[-0x28:-0x24] == 'FLIM') and (len(data) == unpack(self.order + 'I', data[-0x1C:-0x18])[0])) def _is_bflan(self, data): return data[:4] == 'FLAN' def _read_bflim_alignment(self, data): return unpack(self.order + 'H', data[-8:-6])[0] def archive(self, fnt_list, data_list, cur_fnt_offset, cur_data_offset): if self.type == self.ARCHIVED: return cur_fnt_offset, cur_data_offset elif self.type == self.FILESYSTEM: file_data = open(self.path, 'rb').read() feed = self._align_data(file_data, cur_data_offset) if feed > 0: data_list.append(feed * '\x00') cur_data_offset += feed data_list.append(file_data) self.data_start_offset = cur_data_offset self.data_end_offset = cur_data_offset + len(file_data) self.name_offset = ((cur_fnt_offset / self._C_FNT_ALIGNMENT) & 0x00ffffff) | (1 << 24) # Always (1 << 24) ? r_path = self.r_path + '\x00' r_path += self._align_fn(r_path, self._C_FNT_ALIGNMENT) * '\x00' cur_fnt_offset += len(r_path) fnt_list.append(r_path) return cur_fnt_offset, self.data_end_offset def check_valid(self): pass _check_valid = check_valid def extract(self, fnt_data, archive_data, path, save_file): if self.type == self.ARCHIVED: name_offset = self.name_offset & 0x00ffffff r_path = get_string(fnt_data[name_offset * self._C_FNT_ALIGNMENT:]) outpath = os.path.join(path, r_path) outdir, name = os.path.split(outpath) if save_file: mkdirs(outdir) data = archive_data[self.data_start_offset:self.data_end_offset] write_file(outpath, data) return r_path, outpath else: return '', '' def pack(self): """Pack the class instance to a str according to 'HEADER_STRUCT'. Args: None Returns: Packed structure data. """ return pack(self.order + self.ENTYR_STRUCT, self.hash, self.name_offset, self.data_start_offset, self.data_end_offset) class FNTBlockHeader(BlockHeader): HEADER_STRUCT = '4sHH' C_STRUCTURE_SIZE = calcsize(HEADER_STRUCT) C_SIGNATURE = 'SFNT' def __init__(self, data=None, order=''): self.order = order if data: (self.signature, self.header_size, reserved) = unpack(order + self.HEADER_STRUCT, data[:self.C_STRUCTURE_SIZE]) self._check_valid() else: self.signature = self.C_SIGNATURE self.header_size = self.C_STRUCTURE_SIZE def check_valid(self): """Check if the class instance is valid. Raises: ValueError: Error occurred when class attribute invalid. """ super(Sarc.FNTBlockHeader, self).check_valid() _check_valid = check_valid def pack(self): """Pack the class instance to a str according to 'HEADER_STRUCT'. Args: None Returns: Packed structure data. """ return pack(self.order + self.HEADER_STRUCT, self.signature, self.header_size, 0) def align(value, alignment): return (value + alignment -1) / alignment * alignment def calchash(data, key): """Calculate file name hash. Args: data: File name data. key: Hash key. Returns: Hash value. """ ret = 0 for c in data: ret = (ret * key + ord(c)) & 0xffffffff return ret def get_string(data): """Get string ending with '\0'. Args: data: Data containing string. Returns: String without '\0'. """ ret = '' for c in data: if '\x00' == c: break ret += c return ret def getrpath(base, full): """Get relative path.""" ret = full[len(base):] while ret[0] in ['/','\\']: ret = ret[1:] return ret.replace('\\','/') def mkdirs(path): if not os.path.exists(path): os.makedirs(path) def walk(dirname): filelist = [] for root,dirs,files in os.walk(dirname): for filename in files: fullname=os.path.join(root,filename) filelist.append(fullname) return filelist def write_file(path, data): fs = open(path, 'wb') fs.write(data) fs.close() #Helper methods def create_archive(path, archive, order, hash_key, verbose, exclude): """Create an archive from the input directory. Args: path: Path to a directory. archive: Path to the archive. order: Byte order of the archive. Must be '>' or '<'. hash_key: File name hash key. Default 0x65. verbose: Enable verbose output. Returns: Boolean """ if (not path) or (not os.path.exists(path)): print 'Directory does not exist. Create archive failed.' return False sarc = Sarc(path=path, order=order, hash_key=hash_key, exclude=exclude) sarc.archive(archive_path=archive, verbose=verbose) def extract_archive(path, archive, verbose): """Extract an archive to the specified directory. Args: path: Path to output directory. archive: Path to the archive. verbose: Enable verbose output. Returns: Boolean """ if not path: print "Output directory hasn't set. Extract archive failed." return False sarc = Sarc(path=archive) sarc.extract(path=path, all=True, verbose=verbose) def list_archive(archive): """List contents in the archive. Args: archive: Path to the archive. Returns: None """ sarc = Sarc(path=archive) sarc.extract(path='', all=True, save_file=False) if '__main__' == __name__: endianess = {'big':'>', 'little':'<'} parser = argparse.ArgumentParser(description='Nintendo Ware Layout SHArchive Tool') parser.add_argument('-v', '--verbose', help='Enable verbose output', action='store_true', default=False) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-x', '--extract', help='Extract the archive', action='store_true', default=False) group.add_argument('-c', '--create', help='Create an archive', action='store_true',default=False) group.add_argument('-l', '--list', help='List contents of the archive', action='store_true', default=False) parser.add_argument('-e', '--endianess', help='Set archive endianess', choices=['big', 'little'], type=str, default='little') parser.add_argument('-k', '--hashkey', help='Set hash key', default=DEFAULT_HASH_KEY) parser.add_argument('-d', '--dir', help='Set working directory') parser.add_argument('-f', '--archive', help='Set archive file', required=True) parser.add_argument('-n', '--exclude', help='Set exclude files', nargs='*', type=str) args = parser.parse_args() if args.create: create_archive(args.dir, args.archive, endianess[args.endianess], args.hashkey, args.verbose, args.exclude) if args.extract: extract_archive(args.dir, args.archive, args.verbose) if args.list: list_archive(args.archive)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implements the graph generation for computation of gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_grad # pylint: disable=unused-import from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops # pylint: disable=unused-import from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_util from tensorflow.python.ops import image_grad # pylint: disable=unused-import from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import from tensorflow.python.ops import logging_ops # pylint: disable=unused-import from tensorflow.python.ops import manip_grad # pylint: disable=unused-import from tensorflow.python.ops import math_grad # pylint: disable=unused-import from tensorflow.python.ops import math_ops from tensorflow.python.ops import optional_grad # pylint: disable=unused-import from tensorflow.python.ops import random_grad # pylint: disable=unused-import from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["gradients"]) def gradients(ys, xs, grad_ys=None, name="gradients", colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None, stop_gradients=None, unconnected_gradients=UnconnectedGradients.NONE): """Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`. `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys` is a list of `Tensor`, holding the gradients received by the `ys`. The list must be the same length as `ys`. `gradients()` adds ops to the graph to output the derivatives of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. `grad_ys` is a list of tensors of the same length as `ys` that holds the initial gradients for each y in `ys`. When `grad_ys` is None, we fill in a tensor of '1's of the shape of y for each y in `ys`. A user can provide their own initial `grad_ys` to compute the derivatives using a different initial gradient for each y (e.g., if one wanted to weight the gradient differently for each value in each y). `stop_gradients` is a `Tensor` or a list of tensors to be considered constant with respect to all `xs`. These tensors will not be backpropagated through, as though they had been explicitly disconnected using `stop_gradient`. Among other things, this allows computation of partial derivatives as opposed to total derivatives. For example: ```python a = tf.constant(0.) b = 2 * a g = tf.gradients(a + b, [a, b], stop_gradients=[a, b]) ``` Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the total derivatives `tf.gradients(a + b, [a, b])`, which take into account the influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is equivalent to: ```python a = tf.stop_gradient(tf.constant(0.)) b = tf.stop_gradient(2 * a) g = tf.gradients(a + b, [a, b]) ``` `stop_gradients` provides a way of stopping gradient after the graph has already been constructed, as compared to `tf.stop_gradient` which is used during graph construction. When the two approaches are combined, backpropagation stops at both `tf.stop_gradient` nodes and nodes in `stop_gradients`, whichever is encountered first. All integer tensors are considered constant with respect to all `xs`, as if they were included in `stop_gradients`. `unconnected_gradients` determines the value returned for each x in xs if it is unconnected in the graph to ys. By default this is None to safeguard against errors. Mathematically these gradients are zero which can be requested using the `'zero'` option. `tf.UnconnectedGradients` provides the following options and behaviors: ```python a = tf.ones([1, 2]) b = tf.ones([3, 1]) g1 = tf.gradients([b], [a], unconnected_gradients='none') sess.run(g1) # [None] g2 = tf.gradients([b], [a], unconnected_gradients='zero') sess.run(g2) # [array([[0., 0.]], dtype=float32)] ``` Let us take one practical example which comes during the back propogation phase. This function is used to evaluate the derivatives of the cost function with respect to Weights `Ws` and Biases `bs`. Below sample implementation provides the exaplantion of what it is actually used for : ```python Ws = tf.constant(0.) bs = 2 * Ws cost = Ws + bs # This is just an example. So, please ignore the formulas. g = tf.gradients(cost, [Ws, bs]) dCost_dW, dCost_db = g ``` Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. grad_ys: Optional. A `Tensor` or list of tensors the same size as `ys` and holding the gradients computed for each y in `ys`. name: Optional name to use for grouping all the gradient ops together. defaults to 'gradients'. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. gate_gradients: If True, add a tuple around the gradients returned for an operations. This avoids some race conditions. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate through. unconnected_gradients: Optional. Specifies the gradient value returned when the given input tensors are unconnected. Accepted values are constants defined in the class `tf.UnconnectedGradients` and the default value is `none`. Returns: A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. Raises: LookupError: if one of the operations between `x` and `y` does not have a registered gradient function. ValueError: if the arguments are invalid. RuntimeError: if called in Eager mode. """ # Creating the gradient graph for control flow mutates Operations. # _mutation_lock ensures a Session.run call cannot occur between creating and # mutating new ops. # pylint: disable=protected-access with ops.get_default_graph()._mutation_lock(): return gradients_util._GradientsHelper( ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients) # pylint: enable=protected-access @tf_export("gradients", v1=[]) def gradients_v2(ys, # pylint: disable=invalid-name xs, grad_ys=None, name="gradients", gate_gradients=False, aggregation_method=None, stop_gradients=None, unconnected_gradients=UnconnectedGradients.NONE): """Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`. `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys` is a list of `Tensor`, holding the gradients received by the `ys`. The list must be the same length as `ys`. `gradients()` adds ops to the graph to output the derivatives of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. `grad_ys` is a list of tensors of the same length as `ys` that holds the initial gradients for each y in `ys`. When `grad_ys` is None, we fill in a tensor of '1's of the shape of y for each y in `ys`. A user can provide their own initial `grad_ys` to compute the derivatives using a different initial gradient for each y (e.g., if one wanted to weight the gradient differently for each value in each y). `stop_gradients` is a `Tensor` or a list of tensors to be considered constant with respect to all `xs`. These tensors will not be backpropagated through, as though they had been explicitly disconnected using `stop_gradient`. Among other things, this allows computation of partial derivatives as opposed to total derivatives. For example: ```python a = tf.constant(0.) b = 2 * a g = tf.gradients(a + b, [a, b], stop_gradients=[a, b]) ``` Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the total derivatives `tf.gradients(a + b, [a, b])`, which take into account the influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is equivalent to: ```python a = tf.stop_gradient(tf.constant(0.)) b = tf.stop_gradient(2 * a) g = tf.gradients(a + b, [a, b]) ``` `stop_gradients` provides a way of stopping gradient after the graph has already been constructed, as compared to `tf.stop_gradient` which is used during graph construction. When the two approaches are combined, backpropagation stops at both `tf.stop_gradient` nodes and nodes in `stop_gradients`, whichever is encountered first. All integer tensors are considered constant with respect to all `xs`, as if they were included in `stop_gradients`. `unconnected_gradients` determines the value returned for each x in xs if it is unconnected in the graph to ys. By default this is None to safeguard against errors. Mathematically these gradients are zero which can be requested using the `'zero'` option. `tf.UnconnectedGradients` provides the following options and behaviors: ```python a = tf.ones([1, 2]) b = tf.ones([3, 1]) g1 = tf.gradients([b], [a], unconnected_gradients='none') sess.run(g1) # [None] g2 = tf.gradients([b], [a], unconnected_gradients='zero') sess.run(g2) # [array([[0., 0.]], dtype=float32)] ``` Let us take one practical example which comes during the back propogation phase. This function is used to evaluate the derivatives of the cost function with respect to Weights `Ws` and Biases `bs`. Below sample implementation provides the exaplantion of what it is actually used for : ```python Ws = tf.constant(0.) bs = 2 * Ws cost = Ws + bs # This is just an example. So, please ignore the formulas. g = tf.gradients(cost, [Ws, bs]) dCost_dW, dCost_db = g ``` Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. grad_ys: Optional. A `Tensor` or list of tensors the same size as `ys` and holding the gradients computed for each y in `ys`. name: Optional name to use for grouping all the gradient ops together. defaults to 'gradients'. gate_gradients: If True, add a tuple around the gradients returned for an operations. This avoids some race conditions. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate through. unconnected_gradients: Optional. Specifies the gradient value returned when the given input tensors are unconnected. Accepted values are constants defined in the class `tf.UnconnectedGradients` and the default value is `none`. Returns: A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. Raises: LookupError: if one of the operations between `x` and `y` does not have a registered gradient function. ValueError: if the arguments are invalid. RuntimeError: if called in Eager mode. """ # Creating the gradient graph for control flow mutates Operations. # _mutation_lock ensures a Session.run call cannot occur between creating and # mutating new ops. # pylint: disable=protected-access with ops.get_default_graph()._mutation_lock(): return gradients_util._GradientsHelper( ys, xs, grad_ys, name, True, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients) # pylint: enable=protected-access # TODO(vrv): Make this available when we want to make it public. def _hessian_vector_product(ys, xs, v): """Multiply the Hessian of `ys` wrt `xs` by `v`. This is an efficient construction that uses a backprop-like approach to compute the product between the Hessian and another vector. The Hessian is usually too large to be explicitly computed or even represented, but this method allows us to at least multiply by it for the same big-O cost as backprop. Implicit Hessian-vector products are the main practical, scalable way of using second derivatives with neural networks. They allow us to do things like construct Krylov subspaces and approximate conjugate gradient descent. Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y, x, v)` will return an expression that evaluates to the same values as (A + A.T) `v`. Args: ys: A scalar value, or a tensor or list of tensors to be summed to yield a scalar. xs: A list of tensors that we should construct the Hessian over. v: A list of tensors, with the same shapes as xs, that we want to multiply by the Hessian. Returns: A list of tensors (or if the list would be length 1, a single tensor) containing the product between the Hessian and `v`. Raises: ValueError: `xs` and `v` have different length. """ # Validate the input length = len(xs) if len(v) != length: raise ValueError("xs and v must have the same length.") # First backprop grads = gradients(ys, xs) assert len(grads) == length elemwise_products = [ math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem)) for grad_elem, v_elem in zip(grads, v) if grad_elem is not None ] # Second backprop return gradients(elemwise_products, xs) @tf_export(v1=["hessians"]) def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None): """Constructs the Hessian of sum of `ys` with respect to `x` in `xs`. `hessians()` adds ops to the graph to output the Hessian matrix of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the Hessian of `sum(ys)`. The Hessian is a matrix of second-order partial derivatives of a scalar tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details). Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. name: Optional name to use for grouping all the gradient ops together. defaults to 'hessians'. colocate_gradients_with_ops: See `gradients()` documentation for details. gate_gradients: See `gradients()` documentation for details. aggregation_method: See `gradients()` documentation for details. Returns: A list of Hessian matrices of `sum(ys)` for each `x` in `xs`. Raises: LookupError: if one of the operations between `xs` and `ys` does not have a registered gradient function. """ xs = gradients_util._AsList(xs) # pylint: disable=protected-access kwargs = { "colocate_gradients_with_ops": colocate_gradients_with_ops, "gate_gradients": gate_gradients, "aggregation_method": aggregation_method } # Compute first-order derivatives and iterate for each x in xs. hessians = [] _gradients = gradients(ys, xs, **kwargs) for gradient, x in zip(_gradients, xs): # change shape to one-dimension without graph branching gradient = array_ops.reshape(gradient, [-1]) # Declare an iterator and tensor array loop variables for the gradients. n = array_ops.size(x) loop_vars = [ array_ops.constant(0, dtypes.int32), tensor_array_ops.TensorArray(x.dtype, n) ] # Iterate over all elements of the gradient and compute second order # derivatives. _, hessian = control_flow_ops.while_loop( lambda j, _: j < n, lambda j, result: (j + 1, result.write(j, gradients(gradient[j], x)[0])), loop_vars ) _shape = array_ops.shape(x) _reshaped_hessian = array_ops.reshape(hessian.stack(), array_ops.concat((_shape, _shape), 0)) hessians.append(_reshaped_hessian) return hessians @tf_export("hessians", v1=[]) def HessiansV2(ys, xs, gate_gradients=False, aggregation_method=None, name="hessians"): return hessians(ys, xs, name=name, gate_gradients=gate_gradients, aggregation_method=aggregation_method) HessiansV2.__doc__ = hessians.__doc__
# ext/associationproxy.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Contain the ``AssociationProxy`` class. The ``AssociationProxy`` is a Python property object which provides transparent proxied access to the endpoint of an association object. See the example ``examples/association/proxied_association.py``. """ import itertools import operator import weakref from .. import exc, orm, util from ..orm import collections, interfaces from ..sql import not_, or_ def association_proxy(target_collection, attr, **kw): """Return a Python property implementing a view of a target attribute which references an attribute on members of the target. The returned value is an instance of :class:`.AssociationProxy`. Implements a Python property representing a relationship as a collection of simpler values, or a scalar value. The proxied property will mimic the collection type of the target (list, dict or set), or, in the case of a one to one relationship, a simple scalar value. :param target_collection: Name of the attribute we'll proxy to. This attribute is typically mapped by :func:`~sqlalchemy.orm.relationship` to link to a target collection, but can also be a many-to-one or non-scalar relationship. :param attr: Attribute on the associated instance or instances we'll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, *attr*), getattr(obj2, *attr*)] If the relationship is one-to-one or otherwise uselist=False, then simply: getattr(obj, *attr*) :param creator: optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the 'value' for the new instance. For dict types, two arguments are passed: key and value. If you want to construct instances differently, supply a *creator* function that takes arguments as above and returns instances. For scalar relationships, creator() will be called if the target is None. If the target is present, set operations are proxied to setattr() on the associated object. If you have an associated object with multiple attributes, you may set up multiple association proxies mapping to different attributes. See the unit tests for examples, and for examples of how creator() functions can be used to construct the scalar relationship on-demand in this situation. :param \*\*kw: Passes along any other keyword arguments to :class:`.AssociationProxy`. """ return AssociationProxy(target_collection, attr, **kw) ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY') """Symbol indicating an :class:`InspectionAttr` that's of type :class:`.AssociationProxy`. Is assigned to the :attr:`.InspectionAttr.extension_type` attibute. """ class AssociationProxy(interfaces.InspectionAttrInfo): """A descriptor that presents a read/write view of an object attribute.""" is_attribute = False extension_type = ASSOCIATION_PROXY def __init__(self, target_collection, attr, creator=None, getset_factory=None, proxy_factory=None, proxy_bulk_set=None): """Construct a new :class:`.AssociationProxy`. The :func:`.association_proxy` function is provided as the usual entrypoint here, though :class:`.AssociationProxy` can be instantiated and/or subclassed directly. :param target_collection: Name of the collection we'll proxy to, usually created with :func:`.relationship`. :param attr: Attribute on the collected instances we'll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, attr), getattr(obj2, attr)] :param creator: Optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the 'value' for the new instance. For dict types, two arguments are passed: key and value. If you want to construct instances differently, supply a 'creator' function that takes arguments as above and returns instances. :param getset_factory: Optional. Proxied attribute access is automatically handled by routines that get and set values based on the `attr` argument for this proxy. If you would like to customize this behavior, you may supply a `getset_factory` callable that produces a tuple of `getter` and `setter` functions. The factory is called with two arguments, the abstract type of the underlying collection and this proxy instance. :param proxy_factory: Optional. The type of collection to emulate is determined by sniffing the target collection. If your collection type can't be determined by duck typing or you'd like to use a different collection implementation, you may supply a factory function to produce those collections. Only applicable to non-scalar relationships. :param proxy_bulk_set: Optional, use with proxy_factory. See the _set() method for details. """ self.target_collection = target_collection self.value_attr = attr self.creator = creator self.getset_factory = getset_factory self.proxy_factory = proxy_factory self.proxy_bulk_set = proxy_bulk_set self.owning_class = None self.key = '_%s_%s_%s' % ( type(self).__name__, target_collection, id(self)) self.collection_class = None @property def remote_attr(self): """The 'remote' :class:`.MapperProperty` referenced by this :class:`.AssociationProxy`. .. versionadded:: 0.7.3 See also: :attr:`.AssociationProxy.attr` :attr:`.AssociationProxy.local_attr` """ return getattr(self.target_class, self.value_attr) @property def local_attr(self): """The 'local' :class:`.MapperProperty` referenced by this :class:`.AssociationProxy`. .. versionadded:: 0.7.3 See also: :attr:`.AssociationProxy.attr` :attr:`.AssociationProxy.remote_attr` """ return getattr(self.owning_class, self.target_collection) @property def attr(self): """Return a tuple of ``(local_attr, remote_attr)``. This attribute is convenient when specifying a join using :meth:`.Query.join` across two relationships:: sess.query(Parent).join(*Parent.proxied.attr) .. versionadded:: 0.7.3 See also: :attr:`.AssociationProxy.local_attr` :attr:`.AssociationProxy.remote_attr` """ return (self.local_attr, self.remote_attr) def _get_property(self): return (orm.class_mapper(self.owning_class). get_property(self.target_collection)) @util.memoized_property def target_class(self): """The intermediary class handled by this :class:`.AssociationProxy`. Intercepted append/set/assignment events will result in the generation of new instances of this class. """ return self._get_property().mapper.class_ @util.memoized_property def scalar(self): """Return ``True`` if this :class:`.AssociationProxy` proxies a scalar relationship on the local side.""" scalar = not self._get_property().uselist if scalar: self._initialize_scalar_accessors() return scalar @util.memoized_property def _value_is_scalar(self): return not self._get_property().\ mapper.get_property(self.value_attr).uselist @util.memoized_property def _target_is_object(self): return getattr(self.target_class, self.value_attr).impl.uses_objects def __get__(self, obj, class_): if self.owning_class is None: self.owning_class = class_ and class_ or type(obj) if obj is None: return self if self.scalar: target = getattr(obj, self.target_collection) return self._scalar_get(target) else: try: # If the owning instance is reborn (orm session resurrect, # etc.), refresh the proxy cache. creator_id, proxy = getattr(obj, self.key) if id(obj) == creator_id: return proxy except AttributeError: pass proxy = self._new(_lazy_collection(obj, self.target_collection)) setattr(obj, self.key, (id(obj), proxy)) return proxy def __set__(self, obj, values): if self.owning_class is None: self.owning_class = type(obj) if self.scalar: creator = self.creator and self.creator or self.target_class target = getattr(obj, self.target_collection) if target is None: setattr(obj, self.target_collection, creator(values)) else: self._scalar_set(target, values) else: proxy = self.__get__(obj, None) if proxy is not values: proxy.clear() self._set(proxy, values) def __delete__(self, obj): if self.owning_class is None: self.owning_class = type(obj) delattr(obj, self.key) def _initialize_scalar_accessors(self): if self.getset_factory: get, set = self.getset_factory(None, self) else: get, set = self._default_getset(None) self._scalar_get, self._scalar_set = get, set def _default_getset(self, collection_class): attr = self.value_attr _getter = operator.attrgetter(attr) getter = lambda target: _getter(target) if target is not None else None if collection_class is dict: setter = lambda o, k, v: setattr(o, attr, v) else: setter = lambda o, v: setattr(o, attr, v) return getter, setter def _new(self, lazy_collection): creator = self.creator and self.creator or self.target_class self.collection_class = util.duck_type_collection(lazy_collection()) if self.proxy_factory: return self.proxy_factory( lazy_collection, creator, self.value_attr, self) if self.getset_factory: getter, setter = self.getset_factory(self.collection_class, self) else: getter, setter = self._default_getset(self.collection_class) if self.collection_class is list: return _AssociationList( lazy_collection, creator, getter, setter, self) elif self.collection_class is dict: return _AssociationDict( lazy_collection, creator, getter, setter, self) elif self.collection_class is set: return _AssociationSet( lazy_collection, creator, getter, setter, self) else: raise exc.ArgumentError( 'could not guess which interface to use for ' 'collection_class "%s" backing "%s"; specify a ' 'proxy_factory and proxy_bulk_set manually' % (self.collection_class.__name__, self.target_collection)) def _inflate(self, proxy): creator = self.creator and self.creator or self.target_class if self.getset_factory: getter, setter = self.getset_factory(self.collection_class, self) else: getter, setter = self._default_getset(self.collection_class) proxy.creator = creator proxy.getter = getter proxy.setter = setter def _set(self, proxy, values): if self.proxy_bulk_set: self.proxy_bulk_set(proxy, values) elif self.collection_class is list: proxy.extend(values) elif self.collection_class is dict: proxy.update(values) elif self.collection_class is set: proxy.update(values) else: raise exc.ArgumentError( 'no proxy_bulk_set supplied for custom ' 'collection_class implementation') @property def _comparator(self): return self._get_property().comparator def any(self, criterion=None, **kwargs): """Produce a proxied 'any' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` and/or :meth:`.RelationshipProperty.Comparator.has` operators of the underlying proxied attributes. """ if self._target_is_object: if self._value_is_scalar: value_expr = getattr( self.target_class, self.value_attr).has( criterion, **kwargs) else: value_expr = getattr( self.target_class, self.value_attr).any( criterion, **kwargs) else: value_expr = criterion # check _value_is_scalar here, otherwise # we're scalar->scalar - call .any() so that # the "can't call any() on a scalar" msg is raised. if self.scalar and not self._value_is_scalar: return self._comparator.has( value_expr ) else: return self._comparator.any( value_expr ) def has(self, criterion=None, **kwargs): """Produce a proxied 'has' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` and/or :meth:`.RelationshipProperty.Comparator.has` operators of the underlying proxied attributes. """ if self._target_is_object: return self._comparator.has( getattr(self.target_class, self.value_attr). has(criterion, **kwargs) ) else: if criterion is not None or kwargs: raise exc.ArgumentError( "Non-empty has() not allowed for " "column-targeted association proxy; use ==") return self._comparator.has() def contains(self, obj): """Produce a proxied 'contains' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` , :meth:`.RelationshipProperty.Comparator.has`, and/or :meth:`.RelationshipProperty.Comparator.contains` operators of the underlying proxied attributes. """ if self.scalar and not self._value_is_scalar: return self._comparator.has( getattr(self.target_class, self.value_attr).contains(obj) ) else: return self._comparator.any(**{self.value_attr: obj}) def __eq__(self, obj): # note the has() here will fail for collections; eq_() # is only allowed with a scalar. if obj is None: return or_( self._comparator.has(**{self.value_attr: obj}), self._comparator == None ) else: return self._comparator.has(**{self.value_attr: obj}) def __ne__(self, obj): # note the has() here will fail for collections; eq_() # is only allowed with a scalar. return self._comparator.has( getattr(self.target_class, self.value_attr) != obj) class _lazy_collection(object): def __init__(self, obj, target): self.ref = weakref.ref(obj) self.target = target def __call__(self): obj = self.ref() if obj is None: raise exc.InvalidRequestError( "stale association proxy, parent object has gone out of " "scope") return getattr(obj, self.target) def __getstate__(self): return {'obj': self.ref(), 'target': self.target} def __setstate__(self, state): self.ref = weakref.ref(state['obj']) self.target = state['target'] class _AssociationCollection(object): def __init__(self, lazy_collection, creator, getter, setter, parent): """Constructs an _AssociationCollection. This will always be a subclass of either _AssociationList, _AssociationSet, or _AssociationDict. lazy_collection A callable returning a list-based collection of entities (usually an object attribute managed by a SQLAlchemy relationship()) creator A function that creates new target entities. Given one parameter: value. This assertion is assumed:: obj = creator(somevalue) assert getter(obj) == somevalue getter A function. Given an associated object, return the 'value'. setter A function. Given an associated object and a value, store that value on the object. """ self.lazy_collection = lazy_collection self.creator = creator self.getter = getter self.setter = setter self.parent = parent col = property(lambda self: self.lazy_collection()) def __len__(self): return len(self.col) def __bool__(self): return bool(self.col) __nonzero__ = __bool__ def __getstate__(self): return {'parent': self.parent, 'lazy_collection': self.lazy_collection} def __setstate__(self, state): self.parent = state['parent'] self.lazy_collection = state['lazy_collection'] self.parent._inflate(self) class _AssociationList(_AssociationCollection): """Generic, converting, list-to-list proxy.""" def _create(self, value): return self.creator(value) def _get(self, object): return self.getter(object) def _set(self, object, value): return self.setter(object, value) def __getitem__(self, index): if not isinstance(index, slice): return self._get(self.col[index]) else: return [self._get(member) for member in self.col[index]] def __setitem__(self, index, value): if not isinstance(index, slice): self._set(self.col[index], value) else: if index.stop is None: stop = len(self) elif index.stop < 0: stop = len(self) + index.stop else: stop = index.stop step = index.step or 1 start = index.start or 0 rng = list(range(index.start or 0, stop, step)) if step == 1: for i in rng: del self[start] i = start for item in value: self.insert(i, item) i += 1 else: if len(value) != len(rng): raise ValueError( "attempt to assign sequence of size %s to " "extended slice of size %s" % (len(value), len(rng))) for i, item in zip(rng, value): self._set(self.col[i], item) def __delitem__(self, index): del self.col[index] def __contains__(self, value): for member in self.col: # testlib.pragma exempt:__eq__ if self._get(member) == value: return True return False def __getslice__(self, start, end): return [self._get(member) for member in self.col[start:end]] def __setslice__(self, start, end, values): members = [self._create(v) for v in values] self.col[start:end] = members def __delslice__(self, start, end): del self.col[start:end] def __iter__(self): """Iterate over proxied values. For the actual domain objects, iterate over .col instead or just use the underlying collection directly from its property on the parent. """ for member in self.col: yield self._get(member) raise StopIteration def append(self, value): item = self._create(value) self.col.append(item) def count(self, value): return sum([1 for _ in util.itertools_filter(lambda v: v == value, iter(self))]) def extend(self, values): for v in values: self.append(v) def insert(self, index, value): self.col[index:index] = [self._create(value)] def pop(self, index=-1): return self.getter(self.col.pop(index)) def remove(self, value): for i, val in enumerate(self): if val == value: del self.col[i] return raise ValueError("value not in list") def reverse(self): """Not supported, use reversed(mylist)""" raise NotImplementedError def sort(self): """Not supported, use sorted(mylist)""" raise NotImplementedError def clear(self): del self.col[0:len(self.col)] def __eq__(self, other): return list(self) == other def __ne__(self, other): return list(self) != other def __lt__(self, other): return list(self) < other def __le__(self, other): return list(self) <= other def __gt__(self, other): return list(self) > other def __ge__(self, other): return list(self) >= other def __cmp__(self, other): return cmp(list(self), other) def __add__(self, iterable): try: other = list(iterable) except TypeError: return NotImplemented return list(self) + other def __radd__(self, iterable): try: other = list(iterable) except TypeError: return NotImplemented return other + list(self) def __mul__(self, n): if not isinstance(n, int): return NotImplemented return list(self) * n __rmul__ = __mul__ def __iadd__(self, iterable): self.extend(iterable) return self def __imul__(self, n): # unlike a regular list *=, proxied __imul__ will generate unique # backing objects for each copy. *= on proxied lists is a bit of # a stretch anyhow, and this interpretation of the __imul__ contract # is more plausibly useful than copying the backing objects. if not isinstance(n, int): return NotImplemented if n == 0: self.clear() elif n > 1: self.extend(list(self) * (n - 1)) return self def copy(self): return list(self) def __repr__(self): return repr(list(self)) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in list(locals().items()): if (util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(list, func_name)): func.__doc__ = getattr(list, func_name).__doc__ del func_name, func _NotProvided = util.symbol('_NotProvided') class _AssociationDict(_AssociationCollection): """Generic, converting, dict-to-dict proxy.""" def _create(self, key, value): return self.creator(key, value) def _get(self, object): return self.getter(object) def _set(self, object, key, value): return self.setter(object, key, value) def __getitem__(self, key): return self._get(self.col[key]) def __setitem__(self, key, value): if key in self.col: self._set(self.col[key], key, value) else: self.col[key] = self._create(key, value) def __delitem__(self, key): del self.col[key] def __contains__(self, key): # testlib.pragma exempt:__hash__ return key in self.col def has_key(self, key): # testlib.pragma exempt:__hash__ return key in self.col def __iter__(self): return iter(self.col.keys()) def clear(self): self.col.clear() def __eq__(self, other): return dict(self) == other def __ne__(self, other): return dict(self) != other def __lt__(self, other): return dict(self) < other def __le__(self, other): return dict(self) <= other def __gt__(self, other): return dict(self) > other def __ge__(self, other): return dict(self) >= other def __cmp__(self, other): return cmp(dict(self), other) def __repr__(self): return repr(dict(self.items())) def get(self, key, default=None): try: return self[key] except KeyError: return default def setdefault(self, key, default=None): if key not in self.col: self.col[key] = self._create(key, default) return default else: return self[key] def keys(self): return self.col.keys() if util.py2k: def iteritems(self): return ((key, self._get(self.col[key])) for key in self.col) def itervalues(self): return (self._get(self.col[key]) for key in self.col) def iterkeys(self): return self.col.iterkeys() def values(self): return [self._get(member) for member in self.col.values()] def items(self): return [(k, self._get(self.col[k])) for k in self] else: def items(self): return ((key, self._get(self.col[key])) for key in self.col) def values(self): return (self._get(self.col[key]) for key in self.col) def pop(self, key, default=_NotProvided): if default is _NotProvided: member = self.col.pop(key) else: member = self.col.pop(key, default) return self._get(member) def popitem(self): item = self.col.popitem() return (item[0], self._get(item[1])) def update(self, *a, **kw): if len(a) > 1: raise TypeError('update expected at most 1 arguments, got %i' % len(a)) elif len(a) == 1: seq_or_map = a[0] # discern dict from sequence - took the advice from # http://www.voidspace.org.uk/python/articles/duck_typing.shtml # still not perfect :( if hasattr(seq_or_map, 'keys'): for item in seq_or_map: self[item] = seq_or_map[item] else: try: for k, v in seq_or_map: self[k] = v except ValueError: raise ValueError( "dictionary update sequence " "requires 2-element tuples") for key, value in kw: self[key] = value def copy(self): return dict(self.items()) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in list(locals().items()): if (util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(dict, func_name)): func.__doc__ = getattr(dict, func_name).__doc__ del func_name, func class _AssociationSet(_AssociationCollection): """Generic, converting, set-to-set proxy.""" def _create(self, value): return self.creator(value) def _get(self, object): return self.getter(object) def _set(self, object, value): return self.setter(object, value) def __len__(self): return len(self.col) def __bool__(self): if self.col: return True else: return False __nonzero__ = __bool__ def __contains__(self, value): for member in self.col: # testlib.pragma exempt:__eq__ if self._get(member) == value: return True return False def __iter__(self): """Iterate over proxied values. For the actual domain objects, iterate over .col instead or just use the underlying collection directly from its property on the parent. """ for member in self.col: yield self._get(member) raise StopIteration def add(self, value): if value not in self: self.col.add(self._create(value)) # for discard and remove, choosing a more expensive check strategy rather # than call self.creator() def discard(self, value): for member in self.col: if self._get(member) == value: self.col.discard(member) break def remove(self, value): for member in self.col: if self._get(member) == value: self.col.discard(member) return raise KeyError(value) def pop(self): if not self.col: raise KeyError('pop from an empty set') member = self.col.pop() return self._get(member) def update(self, other): for value in other: self.add(value) def __ior__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented for value in other: self.add(value) return self def _set(self): return set(iter(self)) def union(self, other): return set(self).union(other) __or__ = union def difference(self, other): return set(self).difference(other) __sub__ = difference def difference_update(self, other): for value in other: self.discard(value) def __isub__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented for value in other: self.discard(value) return self def intersection(self, other): return set(self).intersection(other) __and__ = intersection def intersection_update(self, other): want, have = self.intersection(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) def __iand__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented want, have = self.intersection(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) return self def symmetric_difference(self, other): return set(self).symmetric_difference(other) __xor__ = symmetric_difference def symmetric_difference_update(self, other): want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) def __ixor__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) return self def issubset(self, other): return set(self).issubset(other) def issuperset(self, other): return set(self).issuperset(other) def clear(self): self.col.clear() def copy(self): return set(self) def __eq__(self, other): return set(self) == other def __ne__(self, other): return set(self) != other def __lt__(self, other): return set(self) < other def __le__(self, other): return set(self) <= other def __gt__(self, other): return set(self) > other def __ge__(self, other): return set(self) >= other def __repr__(self): return repr(set(self)) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in list(locals().items()): if (util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(set, func_name)): func.__doc__ = getattr(set, func_name).__doc__ del func_name, func
"""Core views, including the main homepage, post-commit build hook, documentation and header rendering, and server errors. """ from django.core.mail import mail_admins from django.core.urlresolvers import reverse from django.core.cache import cache from django.conf import settings from django.contrib.auth.models import User from django.db.models import F, Max from django.http import HttpResponse, HttpResponseRedirect, \ HttpResponsePermanentRedirect, Http404, HttpResponseNotFound from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.views.decorators.csrf import csrf_view_exempt from django.views.static import serve from django.views.generic import TemplateView from haystack.views import FacetedSearchView from haystack.query import SearchQuerySet, EmptySearchQuerySet from core.forms import FacetedSearchForm from projects.models import Project, ImportedFile, ProjectRelationship from projects.tasks import update_docs, remove_dir from builds.models import Version import json import mimetypes import os import logging log = logging.getLogger(__name__) def homepage(request): #latest_projects = Project.objects.filter(builds__isnull=False).annotate(max_date=Max('builds__date')).order_by('-max_date')[:10] latest_projects = Project.objects.order_by('-modified_date')[:10] featured = Project.objects.filter(featured=True) return render_to_response('homepage.html', {'project_list': latest_projects, 'featured_list': featured, #'updated_list': updated }, context_instance=RequestContext(request)) def random_page(request, project=None): if project: return HttpResponseRedirect(ImportedFile.objects.filter(project__slug=project).order_by('?')[0].get_absolute_url()) return HttpResponseRedirect(ImportedFile.objects.order_by('?')[0].get_absolute_url()) @csrf_view_exempt def wipe_version(request, project_slug, version_slug): version = get_object_or_404(Version, project__slug=project_slug, slug=version_slug) if request.user not in version.project.users.all(): raise Http404("You must own this project to wipe it.") del_dir = version.project.checkout_path(version.slug) if request.method == 'POST' and del_dir: remove_dir.delay(del_dir) return render_to_response('wipe_version.html', { 'del_dir': del_dir, 'deleted': True, }, context_instance=RequestContext(request)) return render_to_response('wipe_version.html', { 'del_dir': del_dir, }, context_instance=RequestContext(request)) @csrf_view_exempt def github_build(request): """ A post-commit hook for github. """ if request.method == 'POST': obj = json.loads(request.POST['payload']) name = obj['repository']['name'] url = obj['repository']['url'] ghetto_url = url.replace('http://', '').replace('https://', '') branch = obj['ref'].replace('refs/heads/', '') log.info("(Github Build) %s:%s" % (ghetto_url, branch)) version_pk = None version_slug = branch try: projects = Project.objects.filter(repo__contains=ghetto_url) for project in projects: version = project.version_from_branch_name(branch) if version: log.info("(Github Build) Processing %s:%s" % (project.slug, version.slug)) default = project.default_branch or project.vcs_repo().fallback_branch if branch == default: #Shortcircuit versions that are default #These will build at "latest", and thus won't be active version = project.versions.get(slug='latest') version_pk = version.pk version_slug = version.slug log.info("(Github Build) Building %s:%s" % (project.slug, version.slug)) elif version in project.versions.exclude(active=True): log.info("(Github Build) Not building %s" % version.slug) return HttpResponseNotFound('Not Building: %s' % branch) else: version_pk = version.pk version_slug = version.slug log.info("(Github Build) Building %s:%s" % (project.slug, version.slug)) else: version_slug = 'latest' branch = 'latest' log.info("(Github Build) Building %s:latest" % project.slug) #version_pk being None means it will use "latest" update_docs.delay(pk=project.pk, version_pk=version_pk, force=True) return HttpResponse('Build Started: %s' % version_slug) except Exception, e: log.error("(Github Build) Failed: %s:%s" % (name, e)) #handle new repos project = Project.objects.filter(repo__contains=ghetto_url) if not len(project): project = Project.objects.filter(name__icontains=name) if len(project): #Bail if we think this thing exists return HttpResponseNotFound('Build Failed') #create project try: email = obj['repository']['owner']['email'] desc = obj['repository']['description'] homepage = obj['repository']['homepage'] repo = obj['repository']['url'] user = User.objects.get(email=email) proj = Project.objects.create( name=name, description=desc, project_url=homepage, repo=repo, ) proj.users.add(user) log.error("Created new project %s" % (proj)) except Exception, e: log.error("Error creating new project %s: %s" % (name, e)) return HttpResponseNotFound('Build Failed') return HttpResponseNotFound('Build Failed') else: return render_to_response('post_commit.html', {}, context_instance=RequestContext(request)) @csrf_view_exempt def bitbucket_build(request): if request.method == 'POST': obj = json.loads(request.POST['payload']) rep = obj['repository'] name = rep['name'] url = "%s%s" % ("bitbucket.org", rep['absolute_url'].rstrip('/')) log.info("(Bitbucket Build) %s" % (url)) try: project = Project.objects.filter(repo__contains=url)[0] update_docs.delay(pk=project.pk, force=True) return HttpResponse('Build Started') except Exception, e: log.error("(Github Build) Failed: %s:%s" % (name, e)) return HttpResponseNotFound('Build Failed') else: return render_to_response('post_commit.html', {}, context_instance=RequestContext(request)) @csrf_view_exempt def generic_build(request, pk): project = Project.objects.get(pk=pk) context = {'built': False, 'project': project} if request.method == 'POST': context['built'] = True slug = request.POST.get('version_slug', None) if slug: version = project.versions.get(slug=slug) update_docs.delay(pk=pk, version_pk=version.pk, force=True) else: update_docs.delay(pk=pk, force=True) #return HttpResponse('Build Started') return render_to_response('post_commit.html', context, context_instance=RequestContext(request)) return render_to_response('post_commit.html', context, context_instance=RequestContext(request)) def legacy_serve_docs(request, username, project_slug, filename): proj = get_object_or_404(Project, slug=project_slug) default_version = proj.get_default_version() url = reverse(serve_docs, kwargs={ 'project_slug': project_slug, 'version_slug': default_version, 'lang_slug': 'en', 'filename': filename }) return HttpResponsePermanentRedirect(url) def subproject_serve_docs(request, project_slug, lang_slug, version_slug, filename=''): parent_slug = request.slug subproject_qs = ProjectRelationship.objects.filter(parent__slug=parent_slug, child__slug=project_slug) if subproject_qs.exists(): return serve_docs(request, lang_slug, version_slug, filename, project_slug) else: log.info('Subproject lookup failed: %s:%s' % (project_slug, parent_slug)) raise Http404("Subproject does not exist") def serve_docs(request, lang_slug, version_slug, filename, project_slug=None): if not project_slug: project_slug = request.slug proj = get_object_or_404(Project, slug=project_slug) if not version_slug or not lang_slug: version_slug = proj.get_default_version() url = reverse(serve_docs, kwargs={ 'project_slug': project_slug, 'version_slug': version_slug, 'lang_slug': 'en', 'filename': filename }) return HttpResponseRedirect(url) if not filename: filename = "index.html" #This is required because we're forming the filenames outselves instead of letting the web server do it. elif proj.documentation_type == 'sphinx_htmldir' and "_static" not in filename and "_images" not in filename and "html" not in filename and not "inv" in filename: filename += "index.html" else: filename = filename.rstrip('/') basepath = proj.rtd_build_path(version_slug) log.info('Serving %s for %s' % (filename, proj)) if not settings.DEBUG: fullpath = os.path.join(basepath, filename) mimetype, encoding = mimetypes.guess_type(fullpath) mimetype = mimetype or 'application/octet-stream' response = HttpResponse(mimetype=mimetype) if encoding: response["Content-Encoding"] = encoding try: response['X-Accel-Redirect'] = os.path.join('/user_builds', proj.slug, 'rtd-builds', version_slug, filename) except UnicodeEncodeError: raise Http404 return response else: return serve(request, filename, basepath) def server_error(request, template_name='500.html'): """ A simple 500 handler so we get media """ r = render_to_response(template_name, context_instance = RequestContext(request) ) r.status_code = 500 return r def server_error_404(request, template_name='404.html'): """ A simple 500 handler so we get media """ r = render_to_response(template_name, context_instance = RequestContext(request) ) r.status_code = 404 return r class SearchView(TemplateView): template_name = "search/base_facet.html" results = EmptySearchQuerySet() form_class = FacetedSearchForm form = None query = '' selected_facets = None selected_facets_list = None def get_context_data(self, request, **kwargs): context = super(SearchView, self).get_context_data(**kwargs) context['request'] = self.request context['facets'] = self.results.facet_counts() # causes solr request #1 context['form'] = self.form context['query'] = self.query context['selected_facets'] = '&'.join(self.selected_facets) if self.selected_facets else '' context['selected_facets_list'] = self.selected_facets_list context['results'] = self.results context['count'] = len(self.results) # causes solr request #2 return context def get(self, request, **kwargs): """ Performing the search causes three requests to be sent to Solr. 1. For the facets 2. For the count (unavoidable, as pagination will cause this anyay) 3. For the results """ self.request = request self.form = self.build_form() self.selected_facets = self.get_selected_facets() self.selected_facets_list = self.get_selected_facets_list() self.query = self.get_query() if self.form.is_valid(): self.results = self.get_results() context = self.get_context_data(request, **kwargs) # For returning results partials for javascript if request.is_ajax() or request.GET.get('ajax'): self.template_name = 'search/faceted_results.html' return self.render_to_response(context) def build_form(self): """ Instantiates the form the class should use to process the search query. """ data = self.request.GET if len(self.request.GET) else None return self.form_class(data, facets=('project',)) def get_selected_facets_list(self): return [tuple(s.split(':')) for s in self.selected_facets if s] def get_selected_facets(self): """ Returns the a list of facetname:value strings e.g. [u'project_exact:Read The Docs', u'author_exact:Eric Holscher'] """ return self.request.GET.getlist('selected_facets') def get_query(self): """ Returns the query provided by the user. Returns an empty string if the query is invalid. """ return self.request.GET.get('q') def get_results(self): """ Fetches the results via the form. """ return self.form.search()
"""A Jupyter Notebook interface to Klampt. Examples: Basic usage:: from klampt import * from klampt.vis.ipython import KlamptWidget from IPython.display import display world = WorldModel() ... #set up the world... kvis = KlamptWidget(world,width=800,height=640) display(kvis) # This pops up a window in Jupyter Immedate changes can be made using the methods in KlamptWidget:: kvis.addText(name="text_id",text="hello",position=(10,10)) kvis.addSphere(x=0,y=1.5,z=0,r=0.4) Change the configuration of things in the world, and then call update() to see the changes:: robot = world.robot(0) q = robot.getConfig() q[2] += 1.0 robot.setConfig(q) kvis.update() # The previous changes are not made until this is called If you completely change the number of objects in the world, or their underlying geometries, you will need to call w.setWorld(world) again. This is relatively expensive, so try not to do it too often:: world.readElement(...) kvis.setWorld(world) """ from klampt import threejs_get_scene,threejs_get_transforms from klampt.math import vectorops,so3,se3 from klampt.model import types from klampt.model.trajectory import Trajectory,RobotTrajectory,SE3Trajectory from klampt import RobotModel,RobotModelLink import json import time import math import ipywidgets as widgets from ipywidgets import interact, interactive, fixed, interact_manual from traitlets import Unicode, Dict, List, Int, validate, observe import traitlets import threading import warnings DEFAULT_POINT_RADIUS = 0.05 DEFAULT_AXIS_LENGTH = 0.2 DEFAULT_AXIS_WIDTH = 1 VALID_ITEM_TYPES = set(['Config','Configs','Vector3','RigidTransform','Trajectory','Geometry3D','TriangleMesh','WorldModel']) class KlamptWidget(widgets.DOMWidget): """ A Python interface with the Jupyter notebook frontend. The API is similar to the vis module, but has a reduced and slightly modified set of hooks. Attributes: width (Int): the width of the view in pixels (public property) height (Int): the height of the view in pixels (public property) scene (Dict): the scene JSON message (private) transforms (Dict): the transforms JSON message (private) rpc (Dict): the rpc JSON message (private) _camera (Dict): the incoming camera JSON message from the frontend (private) camera (Dict): the outgoing camera JSON message (private) drawn (Int): the incoming drawn message from the frontend (private) events (List): incoming events from the frontend (private) world (WorldModel): the WorldModel isinstance _extras (dict): a dict mapping extra item names to (type,threejs_items) pairs _rpc_calls (list): a list of pending RPC calls between beginRpc() and endRpc() _aggregating_rpc (int): non-zero if between beginRpc and endRpc """ _model_name = Unicode('KlamptModel').tag(sync=True) _view_name = Unicode('KlamptView').tag(sync=True) _model_module = Unicode('klampt-jupyter-widget').tag(sync=True) _view_module = Unicode('klampt-jupyter-widget').tag(sync=True) _model_module_version = Unicode('0.1.1').tag(sync=True) _view_module_version = Unicode('0.1.1').tag(sync=True) width = Int(800).tag(sync=True) height = Int(600).tag(sync=True) scene = Dict().tag(sync=True) transforms = Dict().tag(sync=True) rpc = Dict().tag(sync=True) _camera = Dict().tag(sync=True) events = List().tag(sync=True) drawn = Int(0).tag(sync=True) def __init__(self,world=None,*args,**kwargs): widgets.DOMWidget.__init__(self,*args,**kwargs) self.world = world self._extras = dict() self._aggregating_rpc = 0 self._rpc_calls = [] if world is not None: self.setWorld(world) self.rpc = {} self.displayed = False self.beginRpc(True) return def __repr__(self): if not self.displayed: self.endRpc(True) return widgets.DOMWidget.__repr__(self) def setWorld(self,world): """Resets the world to a new WorldModel object. """ self.world = world self._extras = dict() self._aggregating_rpc = 0 self._rpc_calls = [] s = threejs_get_scene(self.world) self.scene = json.loads(s) def update(self): """Updates the view with changes to the world. Unlike setWorld(), this only pushes the geometry transforms, so it's much faster.""" if self.world: s = threejs_get_transforms(self.world) self.transforms = json.loads(s) def clear(self): """Clears everything from the visualization, including the world.""" self._extras = dict() self._aggregating_rpc = 0 self._rpc_calls = [] self._do_rpc({'type':'reset_scene'}) self.drawn = 0 self.displayed = False self.beginRpc(True) self.world = None def clearExtras(self): """Erases all ghosts, lines, points, text, etc from the visualization, but keeps the world.""" self._extras = dict() self._do_rpc({'type':'clear_extras'}) #TODO: implement this to be more similar to the vis API #def clearText(self): def add(self,name,item,type='auto',**kwargs): """Adds the item to the world, and returns a list of identifiers associated with it. Args: name (str): the name of the item, which will be used to refer to it from now on item: the item data type (str, optional): either 'auto' (default) or a string describing the type of ``item``, which can help disambiguate some types like 'Config' vs 'Vector3' (see below) kwargs: possible attributes. Examples include color, size, length, and width Supports items of type: * Config, as a ghost (list, same size as robot) * Configs, as a set of ghosts (list of lists, same size as robot) * Vector3, drawn as a sphere (3-list) * RigidTransform, drawn as an xform (pair of 9-list and 3-list) * Configs, drawn as a polyline (list of 3-lists) * Trajectory, drawn either as: * a polyline (3D Trajectory objects), * set of milestones (Trajectory or RobotTrajectory objects) * a polyline + set of rigid transform milestones (SE3Trajectory objects) * WorldModel, but only one world at once is supported (same as setWorld). """ if type == 'auto': try: candidates = types.object_to_types(item,self.world) except Exception: raise ValueError("Invalid item, not a known Klamp't type") if isinstance(candidates,(list,tuple)): #print("KlamptWidget.add: multiple matching types:",candidates) if 'Config' in candidates: if self.world is None: candidates.remove('Config') else: match = any(len(item) == self.world.robot(i).numLinks() for i in range(self.world.numRobots())) if not match: candidates.remove('Config') new_candidates = [v for v in candidates if v in VALID_ITEM_TYPES] if len(new_candidates)==0: raise ValueError("Invalid item, types %s not supported by IPython widget"%(str(candidates),)) type = new_candidates[0] else: type = candidates if type == 'Config': res = self.addGhost(name) self.setGhostConfig(item,name=name) if 'color' in kwargs: KlamptWidget.setColor(self,res,*kwargs['color']) return [res] elif type == 'Configs': if len(item[0]) == 3: #it's a polyline self.addPolyline(name,item) if 'color' in kwargs: KlamptWidget.setColor(self,name,*kwargs['color']) return [name] else: #it's a set of configurations names = [] for i,q in enumerate(item): iname = name+'_'+str(i) self.addGhost(iname) self.setGhostConfig(q,name=iname) names.append(iname) self._extras[name] = ('Configs',names) if 'color' in kwargs: KlamptWidget.setColor(self,name,*kwargs['color']) return names elif type == 'Vector3': self.addSphere(name,item[0],item[1],item[2],kwargs.get('size',DEFAULT_POINT_RADIUS)) if 'color' in kwargs: KlamptWidget.setColor(self,name,*kwargs['color']) return [name] elif type == 'RigidTransform': self.addXform(name,length=kwargs.get('length',DEFAULT_AXIS_LENGTH),width=kwargs.get('width',DEFAULT_AXIS_WIDTH)) self.setTransform(name,R=item[0],t=item[1]) return [name] elif type == 'Trajectory': if isinstance(item,SE3Trajectory): res = [] ttraj = [] for i in item.milestones: T = item.to_se3(item.milestones[i]) res += self.add(name+"_milestone_"+str(i),T) ttraj.append(T[1]) res += self.add(name,ttraj,**kwargs) self._extras[name] = ('Trajectory',res) return res elif isinstance(item,RobotTrajectory): #it's a set of configurations rindex = item.robot.index names = [] for i,q in enumerate(item.milestones): iname = name+'_'+str(i) self.addGhost(iname,rindex) self.setGhostConfig(q,iname,rindex) names.append(iname) self._extras[name] = ('Configs',names) if 'color' in kwargs: for name in names: KlamptWidget.setColor(self,name,*kwargs['color']) return names else: return self.add(name,item.milestones,**kwargs) elif type == 'Geometry3D': if item.type() == 'PointCloud': pc = item.getPointCloud() res = self.add(name,pc,'PointCloud',**kwargs) self.setTransform(name,*item.getCurrentTransform()) return res else: g = item.convert('TriangleMesh') tris = g.getTriangleMesh() res = self.add(name,tris,'TriangleMesh',**kwargs) self.setTransform(name,*item.getCurrentTransform()) return res elif type == 'TriangleMesh': tris = item data = ([v for v in tris.vertices],[i for i in tris.indices]) self._extras[name] = ('Trilist',data) self._do_rpc({'type':'add_trimesh','name':name,'verts':data[0],'tris':data[1]} ) if 'color' in kwargs: KlamptWidget.setColor(self,name,*kwargs['color']) return [name] elif type == 'PointCloud': pc = item from klampt.model import geometry colors = geometry.point_cloud_colors(pc,'rgb') data = ([v for v in pc.vertices],colors) self._extras[name] = ('Points',data) msg = {'type':'add_points','name':name,'verts':data[0],'size':kwargs.get('size',0.01)} if colors is not None: msg['colors'] = colors self._do_rpc(msg) if 'color' in kwargs: KlamptWidget.setColor(self,name,*kwargs['color']) return [name] elif type == 'WorldModel': if name != 'world' or self.world is not None: warnings.warn("KlamptWidget.add: only one world is supported, and should be added as world") self.world = item s = threejs_get_scene(self.world) self.scene = json.loads(s) else: raise ValueError("KlamptWidget can't handle objects of type "+type+" yet") def remove(self,name): """Removes a certain named target, e.g. a ghost, line, text, etc.""" self._do_rpc({'type':'remove','object':name}) def hide(self,name,hidden=True): """Hides/shows named target, e.g. a ghost, line, text, etc.""" self._do_rpc({'type':'set_visible','object':name,'value':(not hidden)}) def resetCamera(self): """Resets the camera to the original view""" self._do_rpc({'type':'reset_camera'}) def getCamera(self): """Returns a data structure representing the current camera view""" res = dict(self._camera).copy() if 'r' in res: del res['r'] return res def setCamera(self,cam): """Sets the current camera view""" msg = dict(cam).copy() msg['type'] = 'set_camera' self._do_rpc(msg) marked = dict(cam).copy() marked['r'] = 1 self._camera = marked def hide(self,name,value=False): """Changes the visibility status of a certain named target""" target_name = name if name in self._extras: type,data = self._extras[name] if type == 'Config': target_name = data elif type == 'Configs' or type == 'Trajectory': self.beginRpc(strict=False) for subitem in data: self._do_rpc({'type':'set_visible','object':subitem,'value':value}) self.endRpc(strict=False) return self._do_rpc({'type':'set_visible','object':target_name,'value':value}) def setColor(self,target,r,g,b,a=1.0): """Sets the given RobotModel, RobotModelLink, named link, indexed link, or object name to some RGBA color (each channel in the range [0,1]).""" recursive=False target_name = None if isinstance(target, (int, float, complex)): robot = self.world.robot(0) target_as_link = robot.link(target) target_name=target_as_link.getName() elif isinstance(target,RobotModelLink): target_name=target.getName() elif isinstance(target,RobotModel): target_name=target.getName() recursive = True elif isinstance(target, str): target_name=target if target in self._extras: type,data = self._extras[target] if type == 'Config': target_name = data recursive = True elif type == 'Configs' or type == 'Trajectory': #it's a group set everything under the group self.beginRpc(strict=False) for subitem in data: KlamptWidget.setColor(self,subitem,r,g,b,a) self.endRpc(strict=False) return else: #see if it's the name of a robot try: self.world.robot(target).index recursive = True except Exception: found = False for i in range(self.world.numRobots()): if self.world.robot(i).link(target).index >= 0: found = True break if not found: raise ValueError("ERROR: setColor requires target of either robot, link, index, or string name of object!") else: raise ValueError("ERROR: setColor requires target of either robot, link, index, or string name of object!") rgba_color = [r,g,b,a] if recursive: self._do_rpc({'type':'set_color','object':target_name,'rgba':rgba_color,'recursive':True}) else: self._do_rpc({'type':'set_color','object':target_name,'rgba':rgba_color}) #print "Setting link color!",('object',target_name,'rgba'),rgba_color def setTransform(self,name,R=so3.identity(),t=[0]*3,matrix=None): """Sets the transform of the target object. If matrix is given, it's a 16-element array giving the 4x4 homogeneous transform matrix, in row-major format. Otherwise, R and t are the 9-element klampt.so3 rotation and 3-element translation.""" if matrix is not None: self._do_rpc({'type':'set_transform','object':name,'matrix':matrix}) else: self._do_rpc({'type':'set_transform','object':name,'matrix':[R[0],R[3],R[6],t[0],R[1],R[4],R[7],t[1],R[2],R[5],R[8],t[2],0,0,0,1]}) def addGhost(self,name="ghost",robot=0): """Adds a ghost configuration of the robot that can be posed independently. name can be set to identify multiple ghosts. The identifier of the ghost in the three.js scene is prefixname + robot.getName(), and all the links are identified by prefixname + link name.""" if robot < 0 or robot >= self.world.numRobots(): raise ValueError("Invalid robot specified") target_name=self.world.robot(robot).getName() self._do_rpc({'type':'add_ghost','object':target_name,'prefix_name':name}) self._extras[name] = ('Config',name+target_name) return name def getRobotConfig(self,robot=0): """A convenience function. Gets the robot's configuration in the visualization world.""" if robot < 0 or robot >= self.world.numRobots(): raise ValueError("Invalid robot specified") robot = self.world.robot(robot) q = robot.getConfig() return q def setGhostConfig(self,q,name="ghost",robot=0): """Sets the configuration of the ghost to q. If the ghost is named, place its name in prefixname.""" if robot < 0 or robot >= self.world.numRobots(): raise ValueError("Invalid robot specified") robot = self.world.robot(robot) q_original = robot.getConfig() if len(q) != robot.numLinks(): raise ValueError("Config must be correct size: %d != %d"%(len(q),robot.numLinks())) robot.setConfig(q) self.beginRpc(strict=False) rpcs = [] for i in range(robot.numLinks()): T = robot.link(i).getTransform() p = robot.link(i).getParent() if p>=0: Tp = robot.link(p).getTransform() T = se3.mul(se3.inv(Tp),T) mat = se3.homogeneous(T) #mat is now a 4x4 homogeneous matrix linkname = name+robot.link(i).getName() #send to the ghost link with name "name"... self._do_rpc({'type':'set_transform','object':linkname,'matrix':[mat[0][0],mat[0][1],mat[0][2],mat[0][3],mat[1][0],mat[1][1],mat[1][2],mat[1][3],mat[2][0],mat[2][1],mat[2][2],mat[2][3],mat[3][0],mat[3][1],mat[3][2],mat[3][3]]}) self.endRpc(strict=False) robot.setConfig(q_original) #restore original config def addText(self,name="HUD_Text1",text="",position=None): """Adds a new piece of text displayed on the screen. name is a unique identifier of the text, and position=(x,y) are the coordinates of upper left corner of the the text, in percent. """ if position is None: x,y = None,None else: x,y = position self._extras[name] = ('Text',(x,y,text)) self._do_rpc({'type':'add_text','name':name,'x':x,'y':y,'text':text}) def addSphere(self,name="Sphere1",x=0,y=0,z=0,r=1): """Adds a new sphere to the world with the given x,y,z position and radius r.""" self._extras[name] = ('Sphere',(x,y,z,r)) self._do_rpc({'type':'add_sphere','name':name,'x':x,'y':y,'z':z,'r':r}) def addLine(self,name="Line1",x1=0,y1=0,z1=0,x2=1,y2=1,z2=1): """Adds a new line segment to the world connecting point (x1,y1,z1) to (x2,y2,z2)""" verts = [x1,y1,z1,x2,y2,z2] self._extras[name] = ('Line',verts) self._do_rpc({'type':'add_line','name':name,'verts':verts}) def addXform(self,name="Xform1",length=DEFAULT_AXIS_LENGTH,width=DEFAULT_AXIS_WIDTH): """Adds a new transform widget to the world with the given line length and width""" self._extras[name] = ('RigidTransform',(length,width)) self._do_rpc({'type':'add_xform','name':name,'length':length,'width':width}) def addPolyline(self,name="Line1",pts=[]): """Adds a new polygonal line segment to the world connecting the given list of 3-tuples""" verts = sum(pts,[]) self._extras[name] = ('Line',verts) self._do_rpc({'type':'add_line','name':name,'verts':verts}) def addTriangle(self,name="Tri1",a=(0,0,0),b=(1,0,0),c=(0,1,0)): """Adds a new triangle with vertices a,b,c. a,b, and c are 3-lists or 3-tuples.""" verts = a+b+c self._extras[name] = ('Trilist',verts) self._do_rpc({'type':'add_trilist','name':name,'verts':verts}) def addQuad(self,name="Quad1",a=(0,0,0),b=(1,0,0),c=(1,1,0),d=(0,1,0)): """Adds a new quad (in CCW order) with vertices a,b,c,d. a,b,c and d are 3-lists or 3-tuples.""" verts = a+b+c+a+c+d self._extras[name] = ('Trilist',verts) self._do_rpc({'type':'add_trilist','name':name,'verts':verts}) def addBillboard(self,name="Billboard",image=[[]],format='auto',crange=[0,1],colormap='auto',filter='linear',size=(1,1)): """Adds a 2D billboard to the world. The image is a 2D array of values, which is texure-mapped to a quad. By default, the billboard is centered at (0,0,0) and faces up. To modify its location or orientation, call ``setTransform`` on it. Args: name (str): the name used to refer to this item image (list of lists or str): a 2D array of single-channel values, (r,g,b) tuples, or (r,g,b,a) tuples. Rows are listed top to bottom, rows from left to right. Or, can also be a URL. format (str, optional): The image format. Can be: * 'auto': autodetect the type from the image. If the image contains values, the format is 'value'. * 'value': the values are mapped through either 'opacity', 'rainbow', or gradient color mapping. * 'rgb': if the image contains values, they are interpreted as RGB values packed in 24 bit integers. Otherwise, the first 3 channels of the tuple are used. * 'rgba': if the image contains values, they are interpreted as RGB values packed in 32 bit integers. Otherwise, they are assumed to be (r,g,b,a) tuples crange (pair of numbers, optional): the range of the given values / channels. By default [0,1], but if you are using uint8 encoding this should be set to [0,255]. colormap (optional): how the color of the billboard should be set based on the image. Valid values are: * 'auto': if the image contains values, the gradient ((0,0,0),(1,1,1)) is used. Otherwise 'replace' is used. * (color1,color2): interpolates between the two given (r,g,b) or (r,g,b,a) tuples. * 'opacity': sets the alpha channel only. * 'modulate': the value / rgb / rgba texture modulates the billboard color as set by setColor filter (str, optional): how values between pixels are interpolated. Either 'nearest' or 'linear'. size (pair of numbers, optional): the (width,height) pair of the billboard, in world units. """ if not isinstance(image,str): import struct import base64 bytes = [] w,h = None,None h = len(image) for row in image: if w == None: w = len(row) else: assert w == len(row),"Image is not a 2D array" pixel = image[0][0] if format == 'auto': if hasattr(pixel,'__iter__'): if len(pixel) == 4: format = 'rgba' else: format = 'rgb' else: format = 'value' else: if not hasattr(pixel,'__iter__'): format = 'p'+format gradient = (type(colormap) != str) for row in image: for pixel in row: if format == 'value': u = min(1,max(0,(pixel - crange[0]) / (crange[1]-crange[0]))) if gradient: color = vectorops.interpolate(gradient[0],gradient[1],u) r = 0xff * min(1,max(0,color[0])) g = 0xff * min(1,max(0,color[1])) b = 0xff * min(1,max(0,color[2])) packed = (0xff << 24) | (int(b) << 16) | (int(g) << 8) | int(r) bytes.append(struct.pack('<I',packed)) else: val = 0xff * u bytes.append(struct.pack('B',val)) elif format == 'prgb' or format == 'prgba': bytes.append(struct.pack('<I', pixel)) elif format == 'rgb': r = 0xff * min(1,max(0,(pixel[0] - crange[0]) / (crange[1]-crange[0]))) g = 0xff * min(1,max(0,(pixel[1] - crange[0]) / (crange[1]-crange[0]))) b = 0xff * min(1,max(0,(pixel[2] - crange[0]) / (crange[1]-crange[0]))) packed = (0xff << 24) | (int(b) << 16) | (int(g) << 8) | int(r) bytes.append(struct.pack('<I', packed)) elif format == 'rgba': r = 0xff * min(1,max(0,(pixel[0] - crange[0]) / (crange[1]-crange[0]))) g = 0xff * min(1,max(0,(pixel[1] - crange[0]) / (crange[1]-crange[0]))) b = 0xff * min(1,max(0,(pixel[2] - crange[0]) / (crange[1]-crange[0]))) a = 0xff * min(1,max(0,(pixel[3] - crange[0]) / (crange[1]-crange[0]))) packed = (int(a) << 24) | (int(b) << 16) | (int(g) << 8) | int(r) bytes.append(struct.pack('<I', packed)) else: raise ValueError("Invalid format "+format) image = base64.b64encode(''.join(bytes)) self._do_rpc({'type':'add_billboard','name':name,'imagedata':image,'width':w,'height':h,'size':size,'filter':filter,'colormap':colormap}) else: self._do_rpc({'type':'add_billboard','name':name,'image':image,'size':size,'filter':filter,'colormap':colormap}) self._extras[name] = ('Billboard',image) def beginRpc(self,strict=False): """Begins collecting a set of RPC calls to be sent at once, which is a bit faster than doing multiple addX or setX calls. Usage:: widget.beginRpc() widget.addX() ... widget.setX() widget.endRpc() #this sends all the messages at once """ if self._aggregating_rpc == 0: assert len(self._rpc_calls)==0 if self._aggregating_rpc != 0 and strict: raise RuntimeError("Each beginRpc() call must be ended with an endRpc() call") self._aggregating_rpc += 1 return def _do_rpc(self,msg): """Internally used to send or queue an RPC call""" if self._aggregating_rpc: self._rpc_calls.append(msg) else: self.rpc = msg def endRpc(self,strict=False): """Ends collecting a set of RPC calls to be sent at once, and sends the accumulated message""" if self._aggregating_rpc <= 0 or (self._aggregating_rpc!=1 and strict): raise ValueError("Each beginRpc() call must be ended with an endRpc() call") self._aggregating_rpc -= 1 if self._aggregating_rpc == 0 and len(self._rpc_calls) > 0: self.rpc = {'type':'multiple','calls':self._rpc_calls} self._rpc_calls = [] @observe('_camera') def _recv_camera(self,cam): #trigger an update? #print("Klampt widget received '_camera' message") marked = cam['new'].copy() marked['r'] = 1 self._camera = marked @observe('events') def _recv_events(self,events): elist = events['new'] if len(elist) > 0: for event in elist: self.on_event(event) self.events = [] @observe('drawn') def _recv_drawn(self,drawn): self.drawn = 0 self.displayed = True #print("Klampt widget received 'drawn' message") def on_event(self,e): print("KlamptWidget got event",e) def EditConfig(robot,klampt_widget=None,ghost=None,link_selector='slider',link_subset=None,callback=None): """Creates a Jupyter widget for interactive editing of the robot's configuration. Args: robot (RobotModel): the robot to edit klampt_widget (KlamptWidget, optional): the KlamptWidget visualization to update, or None if you don't want to visualize the editing. ghost (str, optional): if not None, this is the name of the ghost that should be updated. Widget updates are shown on the given ghost rather than the actual robot. To get the ghost configuration, you'll need to update the callback. link_selector (str): how to select links. Either: * 'slider': uses an IntSlider widget * 'dropdown': uses a Dropdown widget * 'all': shows sliders for all links link_subset (list, optional): if given, only a subset of links are shown. Otherwise, only non-fixed links are shown. callback (function, optional): a function callback(index,q) called when a DOF's value has changed. Returns: VBox: a widget to be displayed as you like """ qmin,qmax = robot.getJointLimits() qedit = robot.getConfig()[:] if link_subset == None: link_subset = [i for i in range(robot.numLinks()) if qmin[i] != qmax[i]] else: for link in link_subset: if link < 0 or link >= robot.numLinks(): raise ValueError("Invalid link specified in link_subset") link_subset = link_subset[:] def _dochange_link(link): if not math.isinf(qmin[link]): joint_slider.min = qmin[link] joint_slider.max = qmax[link] else: joint_slider.min = -2 joint_slider.max = 2 joint_slider.value = qedit[link] if klampt_widget and ghost == None: #show selected link in color #restore old colors klampt_widget.beginRpc() for i in link_subset: ilink = robot.link(i) KlamptWidget.setColor(klampt_widget,ilink,*ilink.appearance().getColor()) #change new color color = robot.link(link).appearance().getColor() r,g,b,a = color r = 1.0-(1.0-r)*0.5 g = 1.0-(1.0-g)*0.5 KlamptWidget.setColor(klampt_widget,link,r,g,b,a) klampt_widget.endRpc() def _dochange(link,value): if ghost: qold = robot.getConfig() qedit[link] = value robot.setConfig(qedit) if klampt_widget: if ghost: klampt_widget.setGhostConfig(qedit,ghost,robot.index) else: klampt_widget.update() if ghost: robot.setConfig(qold) if callback: callback(link,qedit) if link_selector == 'slider': link_slider=widgets.IntSlider(description='Link',min=0,max=len(link_subset)-1,value=0) joint_slider=widgets.FloatSlider(description='Value',min=0,max=1,value=0.5,step=0.001) _dochange_link(link_subset[0]) @interact(index=link_slider) def change_link(index): link = link_subset[index] _dochange_link(link) link_slider.observe(lambda change:change_link(change['new']),'value') def change_joint_value(value): link = link_subset[link_slider.value] _dochange(link,value) joint_slider.observe(lambda change:change_joint_value(change['new']),'value') return widgets.VBox([link_slider,joint_slider]) elif link_selector == 'dropdown': link_dropdown=widgets.Dropdown(description='Link',options=[robot.link(i).getName() for i in link_subset],value=robot.link(link_subset[0]).getName()) joint_slider=widgets.FloatSlider(description='Value',min=0,max=1,value=0.5,step=0.001) _dochange_link(link_subset[0]) def change_link(name): link = robot.link(name).index _dochange_link(link) link_dropdown.observe(lambda change:change_link(change['new']),'value') def change_joint_value(value): link = robot.link(link_dropdown.value).index _dochange(link,value) joint_slider.observe(lambda change:change_joint_value(change['new']),'value') return widgets.VBox([link_dropdown,joint_slider]) elif link_selector == 'all': sliders = [] for link in link_subset: sliders.append(widgets.FloatSlider(description=robot.link(link).getName(),min=qmin[link],max=qmax[link],value=qedit[link],step=0.001)) sliders[-1].observe(lambda value,link=link:_dochange(link,value['new']),'value') return widgets.VBox(sliders) else: raise ValueError("Invalid link_selector, must be slider, dropdown, or all") def EditPoint(value=None,min=None,max=None,labels=None, klampt_widget=None,point_name='edited_point',point_radius=DEFAULT_POINT_RADIUS, callback=None): """Creates a Jupyter widget for interactive editing of an xyz point Args: value (list of 3 floats, optional): the initial value of the point. If given, this must be a list and will hold the edited values. min/max (list of 3 floats, optional): the minimum and maximum of the point labels (list of strs, optional): if given, the labels of each channel klampt_widget (KlamptWidget, optional): the KlamptWidget visualization to update, or None if you don't want to visualize the point. point_name (str, optional): the name of the point in the visualization world to edit. point_radius (float, optional): the radius of the visualized point. callback (function ,optional): a function callback(xyz) called when a DOF's value has changed. Returns: VBox: a widget that can be displayed as you like """ if value is None: value = [0,0,0] else: if not isinstance(value,list): raise ValueError("value must be a 3-element list") if len(value) != 3: raise ValueError("value must be a 3-element list") if labels is None: labels = 'xyz' if min is None: min = vectorops.add(value,[-5,-5,-5]) elif isinstance(min,(int,float)): min = [min,min,min] if max is None: max = vectorops.add(value,[5,5,5]) elif isinstance(max,(int,float)): max = [max,max,max] if len(min) != 3: raise ValueError("min must be a 3-element list") if len(max) != 3: raise ValueError("max must be a 3-element list") if klampt_widget: klampt_widget.addSphere(name=point_name,x=value[0],y=value[1],z=value[2],r=point_radius) def _dochange(index,element): value[index] = element if klampt_widget: klampt_widget.addSphere(name=point_name,x=value[0],y=value[1],z=value[2],r=point_radius) if callback: callback(value) elems = [] for i in range(3): elems.append(widgets.FloatSlider(description=labels[i],value=value[i],min=min[i],max=max[i],step=0.001)) elems[-1].observe(lambda v,i=i:_dochange(i,v['new']),'value') return widgets.VBox(elems) def EditTransform(value=None,xmin=None,xmax=None,labels=None, klampt_widget=None,xform_name='edited_xform',axis_length=DEFAULT_AXIS_LENGTH,axis_width=DEFAULT_AXIS_WIDTH, callback=None): """Creates a Jupyter widget for interactive editing of a rigid transform point Args: value (klampt.se3 element), optional: the initial value of the transform (klampt.se3 element). If given as (R,t), the R and t members must be lists and will hold the edited values. xmin/xmax (list of 3 floats, optional): the minimum and maximum of the translation labels (list of strs, optional): if given, the labels of roll,pitch,yaw and x,y,z klampt_widget (KlamptWidget, optional): the KlamptWidget visualization to update, or None if you don't want to visualize the point. xform_name (str, optional): the name of the xform in the visualization world to edit. axis_length,axis_width (float, optional): the length and width of the visualized widget callback (function, optional): a function callback((R,t)) called when a DOF's value has changed. Returns: VBox: a widget that can be displayed as you like """ if value is None: value = se3.identity() else: if not isinstance(value,(tuple,list)): raise ValueError("value must be a 2-element sequence") if len(value) != 2: raise ValueError("value must be a 2-element sequence") if len(value[0]) != 9: raise ValueError("value[0] must be a 9-element list") if len(value[1]) != 3: raise ValueError("value[1] must be a 3-element list") if labels is None: labels = ['roll','pitch','yaw','x','y','z'] if xmin is None: xmin = vectorops.add(value[1],[-5,-5,-5]) elif isinstance(xmin,(int,float)): xmin = [xmin,xmin,xmin] if xmax is None: xmax = vectorops.add(value[1],[5,5,5]) elif isinstance(xmax,(int,float)): xmax = [xmax,xmax,xmax] if len(xmin) != 3: raise ValueError("xmin must be a 3-element list") if len(xmax) != 3: raise ValueError("xmax must be a 3-element list") if klampt_widget: klampt_widget.addXform(name=xform_name,length=axis_length,width=axis_width) klampt_widget.setTransform(name=xform_name,R=value[0],t=value[1]) rpy = list(so3.rpy(value[0])) def _do_rotation_change(index,element): rpy[index] = element value[0][:] = so3.from_rpy(rpy) if klampt_widget: klampt_widget.setTransform(name=xform_name,R=value[0],t=value[1]) if callback: callback(value) def _do_translation_change(index,element): value[1][index] = element if klampt_widget: klampt_widget.setTransform(name=xform_name,R=value[0],t=value[1]) if callback: callback(value) elems = [] for i in range(3): elems.append(widgets.FloatSlider(description=labels[i],value=rpy[i],min=0,max=math.pi*2,step=0.001)) elems[-1].observe(lambda v,i=i:_do_rotation_change(i,v['new']),'value') for i in range(3): elems.append(widgets.FloatSlider(description=labels[3+i],value=value[1][i],min=xmin[i],max=xmax[i],step=0.001)) elems[-1].observe(lambda v,i=i:_do_translation_change(i,v['new']),'value') return widgets.VBox(elems) class Playback(widgets.VBox): """A play/pause/reset widget associated with a KlamptWidget. Attributes: klampt_widget (KlamptWidget, optional): the widget that should be updated after each advance call advance (function, optional): a function to be called for each new frame. pause (function, optional): a function to be called when pause is clicked. reset (function, optional): a function to be called when reset is clicked. maxframes (int, optional): the maximum number of frames. If None, this is unlimited. framerate (int, optional): number of frames per second desired. If None, frames are run as quickly as possible quiet (bool): if True, suppresses output during play playbutton, stepbutton, pausebutton, resetbutton (Button): the Button widgets """ def __init__(self,klampt_widget=None,advance=None,reset=None,pause=None,maxframes=None,framerate=None,quiet=False): self.klampt_widget = klampt_widget self.advance = advance self.reset = reset self.pause = pause self.maxframes = maxframes self.framerate = framerate self.quiet = quiet self.playbutton = widgets.Button( description='Play', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Start the animation', icon='play') self.stepbutton = widgets.Button( description='Step', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Step the animation', icon='step-forward') self.pausebutton = widgets.Button( description='Pause', disabled=True, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Pause the animation', icon='pause') self.resetbutton = widgets.Button( description='Reset', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Reset the animation', icon='undo') lock = threading.Lock() playdata = {'thread':None,'stop':0} self.playdata = playdata self.lock = lock self.frame = 0 #If we don't create this now, exceptions will never be printed self.out = widgets.Output() def play_thread_func(lock,playdata): if self.framerate is None: dt = 0 else: dt = 1.0/self.framerate playdata['stop'] = 0 playdata['last_frame_time'] = time.time() def do_advance(drawn=False): if playdata['stop']: return lock.acquire() try: self._advance() except Exception as e: with self.out: print("Exception occurred during Playback.advance, stopping animation") print(e) playdata['stop'] = 1 lock.release() return if self.klampt_widget and dt==0: self.klampt_widget.beginRpc() self.klampt_widget.add("__temp",[time.time(),0,0]) self.klampt_widget.remove("__temp") self.klampt_widget.endRpc() self.frame += 1 lock.release() if self.klampt_widget and dt==0: self.klampt_widget.observe(do_advance,'drawn') #kick it off with an update do_advance() t0 = time.time() while True: if playdata['stop']: break lock.acquire() if self.maxframes is not None and self.frame >= self.maxframes: #print "Stopping play by completion" self.playbutton.disabled = False self.pausebutton.disabled = True self.frame = 0 lock.release() break lock.release() if self.klampt_widget and dt==0: time.sleep(0.05) else: do_advance() t1 = time.time() time.sleep(max(dt-(t1-t0),0)) t0 = time.time() if self.klampt_widget and dt==0: self.klampt_widget.unobserve(do_advance,'drawn') playdata['thread'] = None return def on_play(b): #print "Play clicked" self.pausebutton.disabled = False self.playbutton.disabled = True assert playdata['thread'] == None playdata['thread'] = threading.Thread(target=play_thread_func,args=(lock,playdata)) playdata['thread'].start() def on_pause(b): #print "Pause clicked" self.stop() self._pause() def on_step(b): #print "Step clicked" self.stop() self.frame += 1 self._advance() def on_reset(b): #print "Reset clicked" self.stop() self.frame = 0 self.out.clear_output() self._reset() self.playbutton.on_click(on_play) self.stepbutton.on_click(on_step) self.pausebutton.on_click(on_pause) self.resetbutton.on_click(on_reset) widgets.VBox.__init__(self,[widgets.HBox([self.playbutton,self.stepbutton,self.pausebutton,self.resetbutton]), self.out]) def stop(self): """Stops any ongoing playback""" lock = self.lock playdata = self.playdata if playdata['thread'] is not None: #playing lock.acquire() playdata['stop'] = 1 lock.release() playdata['thread'].join() playdata['thread'] = None playdata['stop'] = 0 self.pausebutton.disabled = True self.playbutton.disabled = False def _advance(self): if self.advance: if self.quiet: self.advance() else: with self.out: self.advance() if self.klampt_widget: self.klampt_widget.update() def _reset(self): if self.reset: with self.out: self.reset() if self.klampt_widget: self.klampt_widget.update() def _pause(self): if self.pause: with self.out: self.pause() if self.klampt_widget: self.klampt_widget.update()
# -*- coding: utf-8 -*- ''' Create virtualenv environments ''' # Import python libs import glob import shutil import logging import os import os.path # Import salt libs import salt.utils from salt.modules import state_std import salt.exceptions log = logging.getLogger(__name__) __opts__ = { 'venv_bin': 'virtualenv' } __pillar__ = {} # Define the module's virtual name __virtualname__ = 'virtualenv' def __virtual__(): return __virtualname__ def create(path, venv_bin=None, no_site_packages=None, system_site_packages=False, distribute=False, clear=False, python=None, extra_search_dir=None, never_download=None, prompt=None, pip=False, symlinks=None, upgrade=None, user=None, runas=None, saltenv='base', **kwargs): ''' Create a virtualenv path The path to create the virtualenv venv_bin : None (default 'virtualenv') The name (and optionally path) of the virtualenv command. This can also be set globally in the minion config file as ``virtualenv.venv_bin``. no_site_packages : None Passthrough argument given to virtualenv if True. Deprecated since ``salt>=0.17.0``. Use ``system_site_packages=False`` instead. system_site_packages : False Passthrough argument given to virtualenv or pyvenv distribute : False Passthrough argument given to virtualenv pip : False Install pip after creating a virtual environment, implies distribute=True clear : False Passthrough argument given to virtualenv or pyvenv python : None (default) Passthrough argument given to virtualenv extra_search_dir : None (default) Passthrough argument given to virtualenv never_download : None (default) Passthrough argument given to virtualenv if True prompt : None (default) Passthrough argument given to virtualenv if not None symlinks : None Passthrough argument given to pyvenv if True upgrade : None Passthrough argument given to pyvenv if True user : None Set ownership for the virtualenv runas : None Set ownership for the virtualenv .. note:: The ``runas`` argument is deprecated as of Hydrogen. ``user`` should be used instead. CLI Example: .. code-block:: bash salt '*' virtualenv.create /path/to/new/virtualenv ''' if venv_bin is None: venv_bin = __opts__.get('venv_bin') or __pillar__.get('venv_bin') # raise CommandNotFoundError if venv_bin is missing salt.utils.check_or_die(venv_bin) if no_site_packages is not None: # Show a deprecation warning salt.utils.warn_until( 'Helium', '\'no_site_packages\' has been deprecated. Please start using ' '\'system_site_packages=False\' which means exactly the same ' 'as \'no_site_packages=True\'. This warning and respective ' 'workaround will be removed in Salt {version}' ) if runas is not None: # The user is using a deprecated argument, warn! salt.utils.warn_until( 'Lithium', 'The \'runas\' argument to pip.install is deprecated, and will be ' 'removed in Salt {version}. Please use \'user\' instead.' ) # "There can only be one" if runas is not None and user: raise salt.exceptions.CommandExecutionError( 'The \'runas\' and \'user\' arguments are mutually exclusive. ' 'Please use \'user\' as \'runas\' is being deprecated.' ) # Support deprecated 'runas' arg elif runas is not None and not user: user = str(runas) if no_site_packages is True and system_site_packages is True: raise salt.exceptions.CommandExecutionError( '\'no_site_packages\' and \'system_site_packages\' are mutually ' 'exclusive options. Please use only one, and prefer ' '\'system_site_packages\' since \'no_site_packages\' has been ' 'deprecated.' ) cmd = [venv_bin] if 'pyvenv' not in venv_bin: # ----- Stop the user if pyvenv only options are used ---------------> # If any of the following values are not None, it means that the user # is actually passing a True or False value. Stop Him! if upgrade is not None: raise salt.exceptions.CommandExecutionError( 'The `upgrade`(`--upgrade`) option is not supported ' 'by {0!r}'.format(venv_bin) ) elif symlinks is not None: raise salt.exceptions.CommandExecutionError( 'The `symlinks`(`--symlinks`) option is not supported ' 'by {0!r}'.format(venv_bin) ) # <---- Stop the user if pyvenv only options are used ---------------- # Virtualenv package try: import virtualenv version = getattr(virtualenv, '__version__', virtualenv.virtualenv_version) virtualenv_version_info = tuple( [int(i) for i in version.split('rc')[0].split('.')] ) except ImportError: # Unable to import?? Let's parse the version from the console version_cmd = '{0} --version'.format(venv_bin) ret = __salt__['cmd.run_all'](version_cmd, runas=user) if ret['retcode'] > 0 or not ret['stdout'].strip(): raise salt.exceptions.CommandExecutionError( 'Unable to get the virtualenv version output using {0!r}. ' 'Returned data: {1!r}'.format(version_cmd, ret) ) virtualenv_version_info = tuple( [int(i) for i in ret['stdout'].strip().split('rc')[0].split('.')] ) if no_site_packages is True: cmd.append('--no-site-packages') if distribute: if virtualenv_version_info >= (1, 10): log.info( 'The virtualenv \'--distribute\' option has been ' 'deprecated in virtualenv(>=1.10), as such, the ' '\'distribute\' option to `virtualenv.create()` has ' 'also been deprecated and it\'s not necessary anymore.' ) else: cmd.append('--distribute') if python is not None and python.strip() != '': if not os.access(python, os.X_OK): raise salt.exceptions.CommandExecutionError( 'Requested python ({0}) does not appear ' 'executable.'.format(python) ) cmd.append('--python={0}'.format(python)) if extra_search_dir is not None: if isinstance(extra_search_dir, basestring) and \ extra_search_dir.strip() != '': extra_search_dir = [ e.strip() for e in extra_search_dir.split(',') ] for entry in extra_search_dir: cmd.append('--extra-search-dir={0}'.format(entry)) if never_download is True: if virtualenv_version_info >= (1, 10): log.info( 'The virtualenv \'--never-download\' option has been ' 'deprecated in virtualenv(>=1.10), as such, the ' '\'never_download\' option to `virtualenv.create()` has ' 'also been deprecated and it\'s not necessary anymore.' ) else: cmd.append('--never-download') if prompt is not None and prompt.strip() != '': cmd.append('--prompt={0!r}'.format(prompt)) else: # venv module from the Python >= 3.3 standard library # ----- Stop the user if virtualenv only options are being used -----> # If any of the following values are not None, it means that the user # is actually passing a True or False value. Stop Him! if no_site_packages is not None: raise salt.exceptions.CommandExecutionError( 'The `no_site_packages`(`--no-site-packages`) option is not ' 'supported by {0!r}'.format(venv_bin) ) elif python is not None and python.strip() != '': raise salt.exceptions.CommandExecutionError( 'The `python`(`--python`) option is not supported ' 'by {0!r}'.format(venv_bin) ) elif extra_search_dir is not None and extra_search_dir.strip() != '': raise salt.exceptions.CommandExecutionError( 'The `extra_search_dir`(`--extra-search-dir`) option is not ' 'supported by {0!r}'.format(venv_bin) ) elif never_download is not None: raise salt.exceptions.CommandExecutionError( 'The `never_download`(`--never-download`) option is not ' 'supported by {0!r}'.format(venv_bin) ) elif prompt is not None and prompt.strip() != '': raise salt.exceptions.CommandExecutionError( 'The `prompt`(`--prompt`) option is not supported ' 'by {0!r}'.format(venv_bin) ) # <---- Stop the user if virtualenv only options are being used ------ if upgrade is True: cmd.append('--upgrade') if symlinks is True: cmd.append('--symlinks') # Common options to virtualenv and pyvenv if clear is True: cmd.append('--clear') if system_site_packages is True: cmd.append('--system-site-packages') # Finally the virtualenv path cmd.append(path) # Let's create the virtualenv ret = __salt__['cmd.run_stdall'](' '.join(cmd), runas=user) state_std(kwargs, ret) if ret['retcode'] > 0: # Something went wrong. Let's bail out now! return ret # Check if distribute and pip are already installed if salt.utils.is_windows(): venv_python = os.path.join(path, 'Scripts', 'python.exe') venv_pip = os.path.join(path, 'Scripts', 'pip.exe') venv_setuptools = os.path.join(path, 'Scripts', 'easy_install.exe') else: venv_python = os.path.join(path, 'bin', 'python') venv_pip = os.path.join(path, 'bin', 'pip') venv_setuptools = os.path.join(path, 'bin', 'easy_install') # Install setuptools if (pip or distribute) and not os.path.exists(venv_setuptools): _install_script( 'https://bitbucket.org/pypa/setuptools/raw/default/ez_setup.py', path, venv_python, user, saltenv=saltenv ) # clear up the distribute archive which gets downloaded for fpath in glob.glob(os.path.join(path, 'distribute-*.tar.gz*')): os.unlink(fpath) if ret['retcode'] > 0: # Something went wrong. Let's bail out now! return ret # Install pip if pip and not os.path.exists(venv_pip): _ret = _install_script( 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py', path, venv_python, user, saltenv=saltenv ) state_std(kwargs, _ret) # Let's update the return dictionary with the details from the pip # installation ret.update( retcode=_ret['retcode'], stdout='{0}\n{1}'.format(ret['stdout'], _ret['stdout']).strip(), stderr='{0}\n{1}'.format(ret['stderr'], _ret['stderr']).strip(), ) return ret def get_site_packages(venv): ''' Returns the path to the site-packages directory inside a virtualenv CLI Example: .. code-block:: bash salt '*' virtualenv.get_site_packages /path/to/my/venv ''' bin_path = os.path.join(venv, 'bin/python') if not os.path.exists(bin_path): raise salt.exceptions.CommandExecutionError( "Path does not appear to be a virtualenv: '{0}'".format(bin_path)) return __salt__['cmd.exec_code'](bin_path, 'from distutils import sysconfig; print sysconfig.get_python_lib()') def _install_script(source, cwd, python, user, saltenv='base'): if not salt.utils.is_windows(): tmppath = salt.utils.mkstemp(dir=cwd) else: tmppath = __salt__['cp.cache_file'](source, saltenv) if not salt.utils.is_windows(): fn_ = __salt__['cp.cache_file'](source, saltenv) shutil.copyfile(fn_, tmppath) os.chmod(tmppath, 320) os.chown(tmppath, __salt__['file.user_to_uid'](user), -1) try: return __salt__['cmd.run_stdall']( '{0} {1}'.format(python, tmppath), runas=user, cwd=cwd, env={'VIRTUAL_ENV': cwd} ) finally: os.remove(tmppath)
"""Kraken - objects.operators.canvas_operator module. Classes: CanvasOperator - Canvas operator object. """ import pprint from kraken.core.maths import MathObject, Mat44, Vec2, Vec3, Xfo from kraken.core.objects.object_3d import Object3D from kraken.core.objects.operators.operator import Operator from kraken.core.objects.attributes.attribute import Attribute from kraken.core.kraken_system import ks from kraken.log import getLogger logger = getLogger('kraken') class CanvasOperator(Operator): """Canvas Operator representation.""" def __init__(self, name, canvasPresetPath): super(CanvasOperator, self).__init__(name) self.canvasPresetPath = canvasPresetPath host = ks.getCoreClient().DFG.host self.binding = host.createBindingToPreset(self.canvasPresetPath) self.node = self.binding.getExec() self.portTypeMap = { 0: 'In', 1: 'IO', 2: 'Out' } # Initialize the inputs and outputs based on the given args. for i in xrange(self.node.getExecPortCount()): portName = self.node.getExecPortName(i) portConnectionType = self.portTypeMap[self.node.getExecPortType(i)] rtVal = self.binding.getArgValue(portName) portDataType = rtVal.getTypeName().getSimpleType() if portDataType == 'Execute': continue if portConnectionType == 'In': if portDataType.endswith('[]'): self.inputs[portName] = [] else: self.inputs[portName] = None else: if portDataType.endswith('[]'): self.outputs[portName] = [] else: self.outputs[portName] = None def getDefaultValue(self, name, RTValDataType, mode="port"): """Returns the default RTVal value for this argument Only print debug if setting default inputs. Don't care about outputs, really Args: name (str): Name of the input to get. mode (str): "inputs" or "outputs" Returns: RTVal """ rtVal = self.node.getPortDefaultValue(name, RTValDataType) logger.debug("Using default value for %s.%s.%s(%s) --> %s" % (self.canvasPresetPath, self.getName(), mode, name, rtVal)) return rtVal def getPresetPath(self): """Returns the preset path within the Canvas library for the node used by this operator. Returns: str: Path of the preset files used by this operator. """ return self.canvasPresetPath def getGraphDesc(self): """Returns the json description of the node used by this operator Returns: object: A json dict containing the description the operator. """ return self.graphDesc def getInput(self, name): """Returns the input with the specified name. Args: name (str): Name of the input to get. Returns: object: Input object. """ if name in self.inputs and self.inputs[name] is not None: return self.inputs[name] def rt2Py(rtVal, rtType): if "[" in rtType: return [] elif rtType == "Xfo": return Xfo(rtVal) elif rtType == "Mat44": return Mat44(rtVal) elif rtType == "Vec2": return Vec2(rtVal) elif rtType == "Vec3": return Vec3(rtVal) elif type(rtVal) in (bool, str, int, float): return rtVal else: return rtVal.getSimpleType() if name not in self.inputs: raise Exception("Input with name '" + name + "' was not found in operator: " + self.getName() + ".") rtVal = self.binding.getArgValue(name) portDataType = rtVal.getTypeName().getSimpleType() defaultValue = self.getDefaultValue(name, portDataType, mode='port') pyVal = rt2Py(defaultValue, portDataType) return pyVal def getInputType(self, name): """Returns the type of input with the specified name.""" for i in xrange(self.node.getExecPortCount()): portName = self.node.getExecPortName(i) portConnectionType = self.portTypeMap[self.node.getExecPortType(i)] rtVal = self.binding.getArgValue(portName) portDataType = rtVal.getTypeName().getSimpleType() if portConnectionType == 'In' and portName == name: return portDataType raise Exception("Could not find input port %s in canvas operator %s" % (name, self.getName())) def getOutputType(self, name): """Returns the type of output with the specified name.""" for i in xrange(self.node.getExecPortCount()): portName = self.node.getExecPortName(i) portConnectionType = self.portTypeMap[self.node.getExecPortType(i)] rtVal = self.binding.getArgValue(portName) portDataType = rtVal.getTypeName().getSimpleType() if portConnectionType == 'Out' and portName == name: return portDataType raise Exception("Could not find output port %s in canvas operator %s" % (name, self.getName())) def evaluate(self): """Invokes the Canvas node causing the output values to be computed. Returns: bool: True if successful. """ super(CanvasOperator, self).evaluate() def getRTVal(obj, asInput=True): if isinstance(obj, Object3D): if asInput: return obj.globalXfo.getRTVal().toMat44('Mat44') else: return obj.xfo.getRTVal().toMat44('Mat44') elif isinstance(obj, Xfo): return obj.getRTVal().toMat44('Mat44') elif isinstance(obj, MathObject): return obj.getRTVal() elif isinstance(obj, Attribute): return obj.getRTVal() elif type(obj) in (int, float, bool, str): return obj def validateArg(rtVal, portName, portDataType): """Validate argument types when passing built in Python types. Args: rtVal (RTVal): rtValue object. portName (str): Name of the argument being validated. portDataType (str): Type of the argument being validated. """ # Validate types when passing a built in Python type if type(rtVal) in (bool, str, int, float): if portDataType in ('Scalar', 'Float32', 'UInt32', 'Integer'): if type(rtVal) not in (float, int): raise TypeError(self.getName() + ".evaluate(): Invalid Arg Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + portName + " (" + portDataType + ")") elif portDataType == 'Boolean': if type(rtVal) != bool: raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + portName + " (" + portDataType + ")") elif portDataType == 'String': if type(rtVal) != str: raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + portName + " (" + portDataType + ")") debug = [] for i in xrange(self.node.getExecPortCount()): portName = self.node.getExecPortName(i) portConnectionType = self.portTypeMap[self.node.getExecPortType(i)] rtVal = self.binding.getArgValue(portName) portDataType = rtVal.getTypeName().getSimpleType() portVal = None if portDataType == '$TYPE$': return if portDataType == 'Execute': continue if portDataType in ('EvalContext', 'time', 'frame'): portVal = ks.constructRTVal(portDataType) self.binding.setArgValue(portName, portVal, False) continue if portConnectionType == 'In': if str(portDataType).endswith('[]'): if not len(self.inputs[portName]): continue rtValArray = ks.rtVal(portDataType) rtValArray.resize(len(self.inputs[portName])) for j in xrange(len(self.inputs[portName])): if self.inputs[portName][j] is None: continue rtVal = getRTVal(self.inputs[portName][j]) validateArg(rtVal, portName, portDataType[:-2]) rtValArray[j] = rtVal portVal = rtValArray self.binding.setArgValue(portName, portVal, False) else: if self.inputs[portName] is None and portName == 'exec': continue elif self.inputs[portName] is None: rtVal = self.getDefaultValue(portName, portDataType, mode="port") else: rtVal = getRTVal(self.inputs[portName]) validateArg(rtVal, portName, portDataType) self.binding.setArgValue(portName, rtVal, False) else: if str(portDataType).endswith('[]'): if not len(self.outputs[portName]): continue rtValArray = ks.rtVal(portDataType) rtValArray.resize(len(self.outputs[portName])) for j in xrange(len(self.outputs[portName])): if self.outputs[portName][j] is None: continue rtVal = getRTVal(self.outputs[portName][j], asInput=False) validateArg(rtVal, portName, portDataType[:-2]) rtValArray[j] = rtVal portVal = rtValArray self.binding.setArgValue(portName, portVal, False) else: if self.outputs[portName] is None and portName == 'exec': continue elif self.outputs[portName] is None: rtVal = self.getDefaultValue(portName, portDataType, mode="port") else: rtVal = getRTVal(self.outputs[portName], asInput=False) validateArg(rtVal, portName, portDataType) self.binding.setArgValue(portName, rtVal, False) portDebug = { portName: [ { "portDataType": portDataType, "portConnectionType": portConnectionType }, portVal ] } debug.append(portDebug) try: self.binding.execute() except Exception as e: logger.error(str(e)) logger.error(self.binding.getErrors(True)) errorMsg = "Possible problem with Canvas operator '" + \ self.getName() + "' port values:" logger.error(errorMsg) logger.error(pprint.pformat(debug, width=800)) # Now put the computed values out to the connected output objects. def setRTVal(obj, rtval): if isinstance(obj, Object3D): obj.xfo.setFromMat44(Mat44(rtval)) elif isinstance(obj, Xfo): obj.setFromMat44(Mat44(rtval)) elif isinstance(obj, Mat44): obj.setFromMat44(rtval) elif isinstance(obj, Attribute): obj.setValue(rtval) else: if hasattr(obj, '__iter__'): print "Warning: Trying to set a canvas port item with an array directly." print "Warning: Not setting rtval: %s\n\tfor output object: %s\n\ton port: %s\n\tof canvas object: %s\n." % \ (rtval, obj, portName, self.getName()) for i in xrange(self.node.getExecPortCount()): portName = self.node.getExecPortName(i) portConnectionType = self.portTypeMap[self.node.getExecPortType(i)] rtVal = self.binding.getArgValue(portName) portDataType = rtVal.getTypeName().getSimpleType() if portDataType == 'Execute': continue if portConnectionType != 'In': if portName == 'exec': # Skip the exec port on each solver continue outVal = self.binding.getArgValue(portName) if str(portDataType).endswith('[]' or hasattr(outVal.getSimpleType(), '__iter__')): for j in xrange(len(outVal)): setRTVal(self.outputs[portName][j], outVal[j]) else: setRTVal(self.outputs[portName], outVal) return True
from twisted.trial.unittest import TestCase from twisted.internet import defer, reactor from mock import MagicMock from alchimia import TWISTED_STRATEGY from sqlalchemy import MetaData, Table, Column, Integer, String, DateTime from sqlalchemy import create_engine, ForeignKey from sqlalchemy.schema import CreateTable from sqlalchemy.pool import StaticPool from crudset.error import TooMany, MissingRequiredFields from crudset.crud import Crud, Paginator, Ref, Sanitizer, Readset, Writeset from crudset.crud import SanitizationContext, SaniChain, crudFromSpec from twisted.python import log import logging class TwistedLogStream(object): def write(self, msg): log.msg(msg.rstrip()) def flush(self): pass def close(self): pass logging.basicConfig(stream=TwistedLogStream()) logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) metadata = MetaData() families = Table('family', metadata, Column('id', Integer, primary_key=True), Column('location', String), Column('surname', String), ) people = Table('people', metadata, Column('id', Integer, primary_key=True), Column('created', DateTime), Column('family_id', Integer, ForeignKey('family.id')), Column('name', String), ) pets = Table('pets', metadata, Column('id', Integer, primary_key=True), Column('name', String), Column('family_id', Integer, ForeignKey('family.id')), Column('owner_id', Integer, ForeignKey('people.id')), ) class CrudTest(TestCase): timeout = 10 @defer.inlineCallbacks def engine(self): engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, reactor=reactor, strategy=TWISTED_STRATEGY, poolclass=StaticPool) yield engine.execute(CreateTable(families)) yield engine.execute(CreateTable(people)) yield engine.execute(CreateTable(pets)) defer.returnValue(engine) def test_sanitizerChain(self): """ If you pass a list of sanitizers as the sanitizer, it will be wrapped in a SaniChain """ sani = Sanitizer(families) crud = Crud(Readset(families), [sani, sani]) self.assertTrue(isinstance(crud.sanitizer, SaniChain)) self.assertEqual(crud.sanitizer.sanitizers, [sani, sani]) def test_read_write_tablesDiffer(self): """ The Readset and Writeset tables must be the same """ self.assertRaises(Exception, Crud, Readset(pets), Sanitizer(families)) @defer.inlineCallbacks def test_create(self): """ You can create an object. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) family = yield crud.create(engine, {'surname': 'Jones'}) self.assertEqual(family['surname'], 'Jones') self.assertNotEqual(family['id'], None) self.assertEqual(family['location'], None) @defer.inlineCallbacks def test_create_fixed(self): """ You can create a Crud with fixed attributes. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families, ['surname'])) crud = crud.fix({'surname':'Hammond'}) family = yield crud.create(engine, {}) self.assertEqual(family['surname'], 'Hammond') fam2 = yield crud.create(engine, {'surname': 'Jones'}) self.assertEqual(fam2['surname'], 'Hammond') @defer.inlineCallbacks def test_create_sanitize(self): """ A policy's sanitizer should be used to sanitize fields. """ engine = yield self.engine() called = {} class Foo(object): sanitizer = Sanitizer(families) @sanitizer.sanitizeData def sani(self, context, data): called['context'] = context return {'surname': 'Jones'} crud = Crud(Readset(families), Foo().sanitizer) family = yield crud.create(engine, {}) self.assertEqual(family['surname'], 'Jones') self.assertEqual(called['context'].action, 'create') self.assertEqual(called['context'].query, None) @defer.inlineCallbacks def test_fix_succession(self): """ You can fix attributes one after the other. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) crud = crud.fix({'surname': 'Jones'}) crud = crud.fix({'location': 'Sunnyville'}) family = yield crud.create(engine, {}) self.assertEqual(family['surname'], 'Jones') self.assertEqual(family['location'], 'Sunnyville') @defer.inlineCallbacks def test_fetch(self): """ When you fetch, you see the readable fields, which means every field by default. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': '13'}) fams = yield crud.fetch(engine) self.assertEqual(len(fams), 1) self.assertEqual(fams[0]['surname'], '13') @defer.inlineCallbacks def test_fetch_fixed(self): """ Fixed attributes restrict the fetched objects. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones'}) crud2 = crud.fix({'surname': 'Johnson'}) fams = yield crud2.fetch(engine) self.assertEqual(len(fams), 0, "Should only find (non-existent) " "records matching the fixed values") @defer.inlineCallbacks def test_fetch_expression(self): """ You can limit even further. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) for i in xrange(10): yield crud.create(engine, {'surname': 'Family %d' % (i,)}) family4 = yield crud.fetch(engine, families.c.surname == 'Family 4') self.assertEqual(len(family4), 1) self.assertEqual(family4[0]['surname'], 'Family 4') @defer.inlineCallbacks def test_fetch_readable(self): """ You can limit the set of readable fields. """ engine = yield self.engine() crud1 = Crud(Readset(families), Sanitizer(families)) yield crud1.create(engine, {'surname': 'Johnson', 'location': 'Alabama'}) crud2 = Crud(Readset(families, ['surname'])) fams = yield crud2.fetch(engine) self.assertEqual(fams[0], {'surname': 'Johnson'}, "Should only show " "the readable fields.") @defer.inlineCallbacks def test_fetch_limit(self): """ You can limit the number of returned records. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) for i in xrange(10): yield crud.create(engine, {'surname': 'Johnson %d' % (i,)}) fams = yield crud.fetch(engine, limit=5) self.assertEqual(len(fams), 5) @defer.inlineCallbacks def test_fetch_order(self): """ You can specify an ordering """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) for i in xrange(10): yield crud.create(engine, {'surname': 'sodkevoiuans'[i]}) fams = yield crud.fetch(engine, order=families.c.surname) ordered = sorted(fams, key=lambda x:x['surname']) self.assertEqual(fams, ordered, "Should be ordered") @defer.inlineCallbacks def test_fetch_offset(self): """ You can offset the limit. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) fams = [] for i in xrange(10): fam = yield crud.create(engine, {'surname': 'abcdefghijklmnop'[i]}) fams.append(fam) results = yield crud.fetch(engine, limit=5, offset=2, order=families.c.surname) self.assertEqual(results, fams[2:2+5]) @defer.inlineCallbacks def test_getOne(self): """ You can get just one item. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) fam = yield crud.create(engine, {'surname': 'hey'}) one = yield crud.getOne(engine) self.assertEqual(one, fam) @defer.inlineCallbacks def test_getOne_where(self): """ You can get one by a where clause """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) fam1 = yield crud.create(engine, {'surname': 'bob'}) yield crud.create(engine, {'surname': 'Jones'}) one = yield crud.getOne(engine, families.c.surname == 'bob') self.assertEqual(one, fam1) @defer.inlineCallbacks def test_getOne_moreThanOne(self): """ If getOne returns more than one, it's an exception. """ engine = yield self.engine() crud = Crud(Readset(families)) yield crud.create(engine, {'surname': 'bob'}) yield crud.create(engine, {'surname': 'Jones'}) self.assertFailure(crud.getOne(engine), TooMany) @defer.inlineCallbacks def test_getOne_None(self): """ If there is no result, return None. """ engine = yield self.engine() crud = Crud(Readset(families)) one = yield crud.getOne(engine) self.assertEqual(one, None) @defer.inlineCallbacks def test_count(self): """ You can count the records. """ engine = yield self.engine() crud = Crud(Readset(families)) for i in xrange(14): yield crud.create(engine, {'surname': str(i)}) count = yield crud.count(engine) self.assertEqual(count, 14) @defer.inlineCallbacks def test_count_where(self): """ You can count filtered records. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) for i in xrange(14): yield crud.create(engine, {'surname': str(i)}) count = yield crud.count(engine, families.c.surname == '12') self.assertEqual(count, 1) @defer.inlineCallbacks def test_count_fixed(self): """ The count is restricted by fixed attributes. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones'}) yield crud.create(engine, {'surname': 'Arnold'}) crud2 = crud.fix({'surname': 'Arnold'}) count = yield crud2.count(engine) self.assertEqual(count, 1) @defer.inlineCallbacks def test_update(self): """ You can update sets. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones'}) fams = yield crud.update(engine, {'surname': 'Jamison'}) self.assertEqual(len(fams), 1) self.assertEqual(fams[0]['surname'], 'Jamison') @defer.inlineCallbacks def test_update_fixed(self): """ Fixed attributes are part of the update. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones', 'location': 'anvilania'}) yield crud.create(engine, {'surname': 'James', 'location': 'gotham'}) crud2 = crud.fix({'surname': 'James'}) yield crud2.update(engine, {'location': 'middle earth'}) fams = yield crud.fetch(engine, families.c.surname == u'Jones') self.assertEqual(fams[0]['location'], 'anvilania') fams = yield crud.fetch(engine, families.c.surname == u'James') self.assertEqual(fams[0]['location'], 'middle earth') @defer.inlineCallbacks def test_update_fixedNoChange(self): """ You aren't allowed to update the fixed attributes. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones', 'location': 'bar'}) crud2 = crud.fix({'surname': 'Jones'}) fams = yield crud2.update(engine, {'surname': 'Allison', 'location': 'hawaii'}) fam = fams[0] self.assertEqual(fam['surname'], 'Jones', "Should keep fixed value") @defer.inlineCallbacks def test_update_nothing(self): """ It's a no-op to update nothing. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones'}) fams = yield crud.update(engine, {}) fam = fams[0] self.assertEqual(fam['surname'], 'Jones') @defer.inlineCallbacks def test_update_allFixed(self): """ All the fixed attributes should be taken into consideration. """ engine = yield self.engine() crud = Crud(Readset(pets), Sanitizer(pets)) yield crud.create(engine, {'name': 'Jones', 'family_id': 1}) yield crud.create(engine, {'name': 'James', 'family_id': 20}) yield crud.create(engine, {'name': 'Jones', 'family_id': 20}) yield crud.create(engine, {'name': 'James', 'family_id': 1}) crud2 = crud.fix({'name': 'James', 'family_id': 20}) yield crud2.update(engine, {'owner_id': -1}) fams = yield crud.fetch(engine) actual = set() for fam in fams: actual.add((fam['owner_id'], fam['name'], fam['family_id'])) expected = set([ (None, 'Jones', 1), (-1, 'James', 20), (None, 'Jones', 20), (None, 'James', 1), ]) self.assertEqual(actual, expected, "Should only change the one thing") @defer.inlineCallbacks def test_update_expression(self): """ You can filter the update by expression, too. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones', 'location': 'anvilania'}) yield crud.create(engine, {'surname': 'James', 'location': 'gotham'}) fams = yield crud.update(engine, {'location': 'middle earth'}, families.c.surname == 'James') self.assertEqual(len(fams), 1) fams = yield crud.fetch(engine, families.c.surname == u'Jones') self.assertEqual(fams[0]['location'], 'anvilania') fams = yield crud.fetch(engine, families.c.surname == u'James') self.assertEqual(fams[0]['location'], 'middle earth') @defer.inlineCallbacks def test_update_sanitize(self): """ A sanitizer should be used to sanitize fields on update. """ engine = yield self.engine() called = [] class Foo(object): sanitizer = Sanitizer(families) @sanitizer.sanitizeData def sani(self, context, data): called.append(context) return {'surname': 'Jones'} crud = Crud(Readset(families), Foo().sanitizer) family = yield crud.create(engine, {}) called.pop() fams = yield crud.update(engine, {'surname': 'Arnold'}, families.c.id==family['id']) self.assertEqual(fams[0]['surname'], 'Jones') self.assertEqual(called[0].action, 'update') self.assertEqual( str(called[0].query), str(families.select().where(families.c.id==family['id']))) @defer.inlineCallbacks def test_update_fixed_sanitizeQuery(self): """ Fixed attributes should make their way into the query passed to the sanitizer. """ engine = yield self.engine() called = [] class Foo(object): sanitizer = Sanitizer(families) @sanitizer.sanitizeData def sani(self, context, data): called.append(context) return data crud = Crud(Readset(families), Foo().sanitizer).fix( {'surname': 'Arnold'}) yield crud.create(engine, {}) called.pop() fams = yield crud.update(engine, {'location': 'Bolivia'}) self.assertEqual(fams[0]['location'], 'Bolivia') self.assertEqual(called[0].action, 'update') self.assertEqual( str(called[0].query), str(families.select().where(families.c.surname=='Arnold'))) @defer.inlineCallbacks def test_delete(self): """ You can delete sets of things. """ engine = yield self.engine() crud = Crud(Readset(families)) yield crud.create(engine, {'surname': 'Jones'}) yield crud.delete(engine) fams = yield crud.fetch(engine, ) self.assertEqual(len(fams), 0) @defer.inlineCallbacks def test_delete_fixed(self): """ The fixed variables influence what is deleted. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones'}) crud2 = crud.fix({'surname': 'Arnold'}) yield crud2.create(engine, {}) yield crud2.delete(engine) fams = yield crud.fetch(engine, ) self.assertEqual(len(fams), 1, "Should have only deleted the fixed") self.assertEqual(fams[0]['surname'], 'Jones') @defer.inlineCallbacks def test_delete_expression(self): """ You can filter by expression. """ engine = yield self.engine() crud = Crud(Readset(families), Sanitizer(families)) yield crud.create(engine, {'surname': 'Jones'}) yield crud.create(engine, {'surname': 'Arnold'}) yield crud.delete(engine, families.c.surname == 'Arnold') fams = yield crud.fetch(engine, ) self.assertEqual(len(fams), 1, "Should have deleted Arnold") @defer.inlineCallbacks def test_references_null(self): """ You can nest referenced tables when fetching. They will be None if there is no row. """ engine = yield self.engine() crud = Crud(Readset(people, references={ 'family': Ref(Readset(families), people.c.family_id == families.c.id), }), Sanitizer(people)) yield crud.create(engine, {'name': 'Sam'}) peeps = yield crud.fetch(engine, ) self.assertEqual(len(peeps), 1) sam = peeps[0] self.assertEqual(sam['family'], None, str(sam)) self.assertEqual(sam['name'], 'Sam') @defer.inlineCallbacks def test_references_notNull(self): """ You can nest objects by reference. """ engine = yield self.engine() fam_crud = Crud(Readset(families), Sanitizer(families)) family = yield fam_crud.create(engine, {'surname': 'Jones'}) crud = Crud(Readset(people, references={ 'family': Ref(Readset(families), people.c.family_id == families.c.id), }), Sanitizer(people)) sam = yield crud.create(engine, {'name': 'Sam', 'family_id': family['id']}) self.assertEqual(sam['family'], family) @defer.inlineCallbacks def test_references_multiple(self): """ You can have multiple references. """ engine = yield self.engine() fam_crud = Crud(Readset(families), Sanitizer(families)) johnson = yield fam_crud.create(engine, {'surname': 'Johnson'}) person_crud = Crud(Readset(people), Sanitizer(people)) john = yield person_crud.create(engine, { 'family_id': johnson['id'], 'name': 'John', }) pets_crud = Crud(Readset(pets, references={ 'family': Ref(Readset(families), pets.c.family_id == families.c.id), 'owner': Ref(Readset(people), pets.c.owner_id == people.c.id), }), Sanitizer(pets)) cat = yield pets_crud.create(engine, { 'family_id': johnson['id'], 'name': 'cat', 'owner_id': john['id'], }) self.assertEqual(cat['name'], 'cat') self.assertEqual(cat['family'], johnson) self.assertEqual(cat['owner'], john) dog = yield pets_crud.create(engine, { 'name': 'dog', 'owner_id': john['id'] }) self.assertEqual(dog['name'], 'dog') self.assertEqual(dog['owner'], john) self.assertEqual(dog['family'], None) fish = yield pets_crud.create(engine, { 'name': 'bob', 'family_id': johnson['id'], }) self.assertEqual(fish['name'], 'bob') self.assertEqual(fish['owner'], None) self.assertEqual(fish['family'], johnson) @defer.inlineCallbacks def test_references_list(self): """ You can reference a list of things. """ engine = yield self.engine() pet_crud = Crud(Readset(pets), Sanitizer(pets)) fam_crud = Crud(Readset(families, references={ 'pets': Ref(Readset(pets), pets.c.family_id == families.c.id, multiple=True), }), Sanitizer(families)) johnson = yield fam_crud.create(engine, {'surname': 'Johnson'}) self.assertEqual(johnson['pets'], []) cat = yield pet_crud.create(engine, { 'family_id': johnson['id'], 'name': 'cat'}) johnson_crud = fam_crud.fix({'id': johnson['id']}) johnson = yield johnson_crud.getOne(engine) self.assertEqual(johnson['pets'], [cat]) dog = yield pet_crud.create(engine, { 'family_id': johnson['id'], 'name': 'dog'}) johnson = yield johnson_crud.getOne(engine) self.assertIn(dog, johnson['pets']) self.assertIn(cat, johnson['pets']) @defer.inlineCallbacks def test_table_attr(self): """ You can expose the table names as an attribute. """ engine = yield self.engine() crud = Crud(Readset(families), table_attr='_object') r = yield crud.create(engine, {'surname': 'Jones'}) self.assertEqual(r['_object'], 'family') rlist = yield crud.fetch(engine) self.assertEqual(rlist[0]['_object'], 'family') rlist = yield crud.update(engine, {'surname': 'Jamison'}) self.assertEqual(rlist[0]['_object'], 'family') @defer.inlineCallbacks def test_table_attr_reference(self): """ table attr works with references, too. """ engine = yield self.engine() fam_crud = Crud(Readset(families)) family = yield fam_crud.create(engine, {'surname': 'Jones'}) crud = Crud(Readset(people, references={ 'family': Ref(Readset(families), people.c.family_id == families.c.id), }), Sanitizer(people), table_attr='foo') sam = yield crud.create(engine, {'name': 'Sam', 'family_id': family['id']}) self.assertEqual(sam['foo'], 'people') self.assertEqual(sam['family']['foo'], 'family') @defer.inlineCallbacks def test_table_map(self): """ You can map table names to something else. """ engine = yield self.engine() fam_crud = Crud(Readset(families), Sanitizer(families)) family = yield fam_crud.create(engine, {'surname': 'Jones'}) crud = Crud( Readset(people, references={ 'family': Ref(Readset(families), people.c.family_id == families.c.id), }), Sanitizer(people), table_attr='foo', table_map={ people: 'Person', families: 'Aardvark', }, ) sam = yield crud.create(engine, {'name': 'Sam', 'family_id': family['id']}) self.assertEqual(sam['foo'], 'Person') self.assertEqual(sam['family']['foo'], 'Aardvark') def test_table_map_attr_fix(self): """ Fixed Cruds should retain the table_attr and map. """ crud = Crud( Readset(families), table_attr='foo', table_map={'foo': 'bar'}, ) fixed = crud.fix({'id': 56}) self.assertEqual(fixed.table_attr, 'foo') self.assertEqual(fixed.table_map, {'foo': 'bar'}) class ReadsetTest(TestCase): def test_default(self): """ By default, all columns are read. """ r = Readset(families) self.assertEqual(r.readable, set(['id', 'location', 'surname'])) self.assertEqual(r.readable_columns, list(families.columns)) self.assertEqual(r.references, {}) def test_readable(self): """ You can specify a list of columns that are readable. """ r = Readset(families, ['location']) self.assertEqual(r.readable, set(['location'])) self.assertEqual(r.readable_columns, [families.c.location]) def test_references(self): """ You can specify a mapping of references. """ ref = Ref(Readset(families), people.c.family_id == families.c.id) r = Readset(people, references={'family': ref}) self.assertEqual(r.references, {'family': ref}) class WritesetTest(TestCase): def test_default(self): """ By default, no fields are writeable. """ w = Writeset(families) self.assertEqual(w.writeable, set([])) data = {'foo': 'bar', 'surname': 'Jones'} output = self.successResultOf(w.sanitize( SanitizationContext(None, 'create', None), data)) self.assertEqual(output, {}, "Should filter out all fields") output = self.successResultOf(w.sanitize( SanitizationContext(None, 'update', None), data)) self.assertEqual(output, {}, "Should filter out all fields") @defer.inlineCallbacks def test_writeable(self): """ Setting a list of writeable fields will let fields pass through. """ w = Writeset(families, ['surname']) self.assertEqual(w.writeable, set(['surname'])) data = {'foo': 'bar', 'surname': 'Jones', 'location': 'Nowhere'} output = yield w.sanitize( SanitizationContext(None, 'update', None), data) self.assertEqual(output, {'surname': 'Jones'}, "Surname is writeable") output = yield w.sanitize( SanitizationContext(None, 'create', None), data) self.assertEqual(output, {'surname': 'Jones'}, "Surname is writeable") def test_writeable_asColumns(self): """ You can specify the writeable fields as SQLAlchemy columns. """ w = Writeset(families, [families.c.surname]) self.assertEqual(w.writeable, set(['surname'])) @defer.inlineCallbacks def test_create_writeable(self): """ Setting a list of create_writeable fields will only let the fields pass through when creating. """ w = Writeset(families, writeable=['location'], create_writeable=['surname']) data = {'foo': 'bar', 'surname': 'Jones', 'location': 'Nowhere'} output = yield w.sanitize( SanitizationContext(None, 'create', None), data) self.assertEqual(output, {'surname': 'Jones', 'location': 'Nowhere'}, "Writeable and create writeable are writeable during create") output = yield w.sanitize( SanitizationContext(None, 'update', None), data) self.assertEqual(output, {'location': 'Nowhere'}, "Only writeable is writeable during update") class PaginatorTest(TestCase): timeout = 10 @defer.inlineCallbacks def engine(self): engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, reactor=reactor, strategy=TWISTED_STRATEGY, poolclass=StaticPool) yield engine.execute(CreateTable(families)) yield engine.execute(CreateTable(people)) yield engine.execute(CreateTable(pets)) defer.returnValue(engine) @defer.inlineCallbacks def test_page(self): """ You can paginate a Crud """ engine = yield self.engine() crud = Crud(Readset(pets)) pager = Paginator(crud, page_size=10, order=pets.c.id) monkeys = [] for i in xrange(40): monkey = yield crud.create(engine, {'name': 'seamonkey %d' % (i,)}) monkeys.append(monkey) page1 = yield pager.page(engine, 0) self.assertEqual(page1, monkeys[:10]) page2 = yield pager.page(engine, 1) self.assertEqual(page2, monkeys[10:20]) @defer.inlineCallbacks def test_page_where(self): """ You can paginate filtered results, too """ engine = yield self.engine() crud = Crud(Readset(pets), Sanitizer(pets)) pager = Paginator(crud, page_size=3, order=pets.c.id) things = [] _things = [ {'name': 'thing 1'}, {'name': 'thing 2'}, {'name': 'dog'}, {'name': 'cat'}, {'name': 'dog'}, ] for thing in _things: t = yield crud.create(engine, thing) things.append(t) page1 = yield pager.page(engine, 0, pets.c.name.startswith('thing')) self.assertEqual(page1, [things[0], things[1]]) count = yield pager.pageCount(engine, pets.c.name.startswith('thing')) self.assertEqual(count, 1) @defer.inlineCallbacks def test_pageCount(self): """ You can count the pages """ engine = yield self.engine() crud = Crud(Readset(pets)) pager = Paginator(crud, page_size=10, order=pets.c.id) monkeys = [] for i in xrange(43): monkey = yield crud.create(engine, {'name': 'seamonkey %d' % (i,)}) monkeys.append(monkey) pages = yield pager.pageCount(engine) self.assertEqual(pages, 5) @defer.inlineCallbacks def test_pageCountForills(self): """ The page count should be accurate for all numbers. """ engine = yield self.engine() crud = Crud(Readset(pets), Sanitizer(pets)) pager = Paginator(crud, page_size=3, order=pets.c.id) count = yield pager.pageCount(engine) self.assertEqual(count, 0, "no records, no pages") yield crud.create(engine, {}) count = yield pager.pageCount(engine) self.assertEqual(count, 1, "1 record, 1 page") yield crud.create(engine, {}) yield crud.create(engine, {}) count = yield pager.pageCount(engine) self.assertEqual(count, 1, "3 records, 1 page") yield crud.create(engine, {}) count = yield pager.pageCount(engine) self.assertEqual(count, 2, "4 records, 2 pages") class SanitizerTest(TestCase): create_context = SanitizationContext(None, 'create', None) update_context = SanitizationContext(None, 'update', None) @defer.inlineCallbacks def test_default(self): """ An empty sanitizer will forbid any column from being updated. """ class Foo(object): sanitizer = Sanitizer(pets) sanitizer = Foo().sanitizer data = { 'foo': 'bar', 'id': 12, 'family_id': 19, 'owner_id': -1, 'name': 'bob', } output = yield sanitizer.sanitize(self.create_context, data) self.assertEqual(output, { 'id': 12, 'family_id': 19, 'owner_id': -1, 'name': 'bob', }, "Should pass-thru on every legitimate column by default") @defer.inlineCallbacks def test_required(self): """ You can specify a list of fields that are required on create. """ sanitizer = Sanitizer(pets, required=['name']) self.assertEqual(set(sanitizer.required), set(['name'])) yield self.assertFailure(sanitizer.sanitize(self.create_context, {}), MissingRequiredFields) yield self.assertFailure(sanitizer.sanitize(self.create_context, {'name': None}), MissingRequiredFields) output = yield sanitizer.sanitize(self.create_context, {'name': 'bob'}) self.assertEqual(output, {'name': 'bob'}) @defer.inlineCallbacks def test_required_update(self): """ Required on create validation does not happen on update except on null-checking. """ sanitizer = Sanitizer(pets, required=['name']) self.assertEqual(set(sanitizer.required), set(['name'])) output = yield sanitizer.sanitize(self.update_context, {}) self.assertEqual(output, {}) yield self.assertFailure(sanitizer.sanitize(self.update_context, {'name': None}), MissingRequiredFields) @defer.inlineCallbacks def test_sanitizeData(self): """ You can specify a function that will sanitize the whole piece of data. """ called = {} class Foo(object): sanitizer = Sanitizer(pets) @sanitizer.sanitizeData def myFunc(self, context, data): called['context'] = context called['data'] = data return {'name': 'john'} sanitizer = Foo().sanitizer indata = { 'foo': 'bar', 'name': 'bob', } output = yield sanitizer.sanitize(self.create_context, indata) self.assertEqual(output, {'name': 'john'}) self.assertEqual(called['context'], self.create_context) self.assertEqual(called['data'], indata) @defer.inlineCallbacks def test_sanitizeField(self): """ You can sanitize individual fields. """ called = {} class Foo(object): sanitizer = Sanitizer(pets) @sanitizer.sanitizeField('name') def name(self, context, data, field): called['context'] = context called['data'] = data.copy() called['field'] = field return 'new name' sanitizer = Foo().sanitizer indata = {'name': 'sam'} output = yield sanitizer.sanitize(self.create_context, indata) self.assertEqual(output, {'name': 'new name'}) self.assertEqual(called['context'], self.create_context) self.assertEqual(called['data'], {'name': 'sam'}) self.assertEqual(called['field'], 'name') @defer.inlineCallbacks def test_sanitizeField_deferred(self): """ Field sanitizers can return deferreds. """ class Foo(object): sanitizer = Sanitizer(pets) @sanitizer.sanitizeField('name') def name(self, context, data, field): return defer.succeed('new name') sanitizer = Foo().sanitizer indata = {'name': 'sam'} output = yield sanitizer.sanitize(self.create_context, indata) self.assertEqual(output, {'name': 'new name'}) @defer.inlineCallbacks def test_sanitizeField_order(self): """ Fields are sanitized in the order added. """ called = [] class Foo(object): sanitizer = Sanitizer(pets) @sanitizer.sanitizeField('name') def name(self, context, data, field): called.append('name') return data[field] @sanitizer.sanitizeField('family_id') def family_id(self, context, data, field): called.append('family_id') return data[field] sanitizer = Foo().sanitizer indata = {'name': 'sam', 'family_id': 12} output = yield sanitizer.sanitize(self.create_context, indata) self.assertEqual(output, {'name': 'sam', 'family_id': 12}) self.assertEqual(called, ['name', 'family_id'], "Should be called " "in the order added") @defer.inlineCallbacks def test_sanitizeField_onlyCalledIfPresent(self): """ The sanitizeField sanitizers should only be called if the field is present in the update/create data. """ called = [] class Foo(object): sanitizer = Sanitizer(pets) @sanitizer.sanitizeField('name') def name(self, context, data, field): called.append('name') return data[field] sanitizer = Foo().sanitizer indata = {'family_id': 12} output = yield sanitizer.sanitize(self.create_context, indata) self.assertEqual(output, {'family_id': 12}) self.assertEqual(called, [], "Should not call name validator since " "name wasn't present") def test_getSanitizedFields(self): """ You can list the fields that are being sanitized. """ class Foo(object): sanitizer = Sanitizer(pets) @sanitizer.sanitizeField('name') def name(self, context, data, field): pass self.assertEqual(Foo.sanitizer.getSanitizedFields(), ['name']) class SaniChainTest(TestCase): @defer.inlineCallbacks def test_calls_all(self): """ The chain will pass the data through each item in the sanitization chain. """ sani1 = MagicMock() sani1.table = 'foo' sani1.sanitize.return_value = {'foo': 'bar'} sani2 = MagicMock() sani2.table = 'foo' sani2.sanitize.return_value = defer.succeed({'hey': 'ho'}) chain = SaniChain([sani1, sani2]) self.assertEqual(chain.table, 'foo') data = {'1': '2'} context = SanitizationContext(None, None, None) output = yield chain.sanitize(context, data) sani1.sanitize.assert_called_once_with(context, data) sani2.sanitize.assert_called_once_with(context, {'foo': 'bar'}) self.assertEqual(output, {'hey': 'ho'}) def test_differentTable(self): """ Sanitizers must have the same table. """ sani1 = MagicMock() sani1.table = 'foo' sani2 = MagicMock() sani2.table = 'bar' self.assertRaises(Exception, SaniChain, [sani1, sani2]) class crudFromSpecTest(TestCase): def assertWriteable(self, crud, expected): """ Assert that the given fields are the complete set of writeable fields. """ dummy = {} for c in crud.readset.table.columns: dummy[c.name] = 'dummy' d = crud.sanitizer.sanitize( SanitizationContext(None, None, None), dummy) output = self.successResultOf(d) self.assertEqual(set(output.keys()), set(expected), "Expected these fields to be writeable: %r" % (expected,)) return output def assertCreateWriteable(self, crud, expected): """ Assert that the given fields are the complete set of writeable fields when creating and that none of them are writeable when updating. """ dummy = {} for c in crud.readset.table.columns: dummy[c.name] = 'dummy' d = crud.sanitizer.sanitize( SanitizationContext(None, 'create', None), dummy) output = self.successResultOf(d) self.assertEqual(set(output), set(expected), "Expected these fields to be writeable during create: %r" % ( expected,)) d = crud.sanitizer.sanitize( SanitizationContext(None, 'update', None), dummy) output = self.successResultOf(d) writeable = set(output) & set(expected) self.assertEqual(writeable, set(), "Expected fields not to be writeable during update: %r" % ( writeable)) return output def test_table_attr(self): """ You can set the table_attr. """ class Base: table = families crud = crudFromSpec(Base, table_attr='foo', table_map={'foo':'bar'}) self.assertEqual(crud.table_attr, 'foo') self.assertEqual(crud.table_map, {'foo':'bar'}) def test_defaults(self): """ By default, all columns are readable and all are writeable """ class Base: table = families crud = crudFromSpec(Base) self.assertEqual(crud.table_attr, None) self.assertEqual(crud.table_map, {}) # readset self.assertTrue(isinstance(crud.readset, Readset)) self.assertEqual(crud.readset.table, families) self.assertEqual(crud.readset.readable_columns, list(families.columns), "All columns should be readable by default") self.assertEqual(crud.readset.references, {}) # sanitizer self.assertEqual(crud.sanitizer.table, families) self.assertWriteable(crud, []) def test_readable(self): """ You can specify the list of readable columns. """ class Base: table = families readable = [ 'id', ] crud = crudFromSpec(Base) self.assertEqual(crud.readset.readable, set(['id'])) self.assertWriteable(crud, []) def test_writeable(self): """ You can specify the list of writeable columns. """ class Base: table = families writeable = [ 'id', ] crud = crudFromSpec(Base) self.assertEqual(crud.readset.readable_columns, list(families.columns)) self.assertWriteable(crud, ['id']) def test_writeable_all(self): """ You can say that all fields are writeable. """ class Base: table = families writeable = 'ALL' crud = crudFromSpec(Base) self.assertEqual(crud.readset.readable_columns, list(families.columns)) self.assertWriteable(crud, [x.name for x in families.columns]) def test_create_writeable(self): """ You can specify that a set of fields are writeable only when creating. """ class Base: table = families create_writeable = [ 'surname', ] crud = crudFromSpec(Base) self.assertCreateWriteable(crud, ['surname']) def test_references(self): """ You can specify a hash of references. """ ref = Ref(Readset(people), families.c.id == people.c.family_id) class Base: table = families references = { 'foo': ref, } crud = crudFromSpec(Base) self.assertEqual(crud.readset.references['foo'], ref) def test_sanitizer(self): """ You can specify a sanitizer """ class Base: table = families writeable = ['surname'] sanitizer = Sanitizer(table) @sanitizer.sanitizeField('surname') def surname(self, context, data, fieldname): return 'sanitized surname' crud = crudFromSpec(Base) output = self.assertWriteable(crud, ['surname']) self.assertEqual(output['surname'], 'sanitized surname')
""" Test the Command state """ import os from sermin import File, AppendParser, IniParser from sermin.utils import shell from .utils import FullTestCase class FileMixin(object): # Path to use for file test path = '/tmp/sermin_test' path_src = '/tmp/sermin_test_src' def clean(self): """ Ensure file is not on the system """ shell('rm -rf {}'.format(self.path)) shell('rm -rf {}'.format(self.path_src)) def read(self): with open(self.path) as file: content = file.readlines() return content class FileTest(FileMixin, FullTestCase): def test_content_creates(self): File(self.path, content='Test content') self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test content') def test_content_copies(self): File(self.path_src, content='Test content') File(self.path, source=self.path_src) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test content') def test_absent_deletes(self): shell('touch {}'.format(self.path)) self.assertTrue(os.path.exists(self.path)) File(self.path, state=File.ABSENT) self.registry_run() self.assertFalse(os.path.exists(self.path)) def test_content_context(self): File(self.path, content='Test {{ arg }} complete', context={ 'arg': 'text_content_context', }) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test text_content_context complete') def test_source_context(self): File(self.path_src, content='Test {{ arg }} complete') File(self.path, source=self.path_src, context={ 'arg': 'test_source_context', }) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test test_source_context complete') def test_invalid_state_raises(self): with self.assertRaisesRegexp(ValueError, r'^Invalid state$'): File(self.path, state='invalid') def test_absent_with_values_raises(self): for arg in ['content', 'source', 'set', 'delete', 'context']: with self.assertRaisesRegexp( ValueError, r'^A File defined in state ABSENT cannot take other arguments$' ): File(self.path, state=File.ABSENT, **{arg: True}) def test_set_delete_missing_parser_raises(self): for arg in ['set', 'delete']: with self.assertRaisesRegexp( ValueError, r'^A File needs a parser to set or delete$' ): File(self.path, **{arg: True}) def test_content_and_source_raises(self): with self.assertRaisesRegexp( ValueError, r'^A File can only be defined with a content or source, not both$' ): File(self.path, content='fail', source='fail') def test_context_missing_content_or_source_raises(self): with self.assertRaisesRegexp( ValueError, r'^A File can only apply a context to a content or source$' ): File(self.path, context={}) class FileAppendTest(FileMixin, FullTestCase): def test_set__new_line__new_file(self): File(self.path, parser=AppendParser, set=['Test content']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test content') def test_set__new_line__existing_file(self): File(self.path, content='Test content 1') File(self.path, parser=AppendParser, set=['Test content 2']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 2) self.assertEqual(content[0], 'Test content 1\n') self.assertEqual(content[1], 'Test content 2') def test_set__existing_line(self): File(self.path, content='Test content 1\nTest content 2') File(self.path, parser=AppendParser, set=['Test content 2']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 2) self.assertEqual(content[0], 'Test content 1\n') self.assertEqual(content[1], 'Test content 2') def test_delete__existing_line(self): File(self.path, content='Test content 1\nTest content 2') File(self.path, parser=AppendParser, delete=['Test content 2']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test content 1') def test_delete__missing_line__existing_file(self): File(self.path, content='Test content 1') File(self.path, parser=AppendParser, delete=['Test content 2']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 1) self.assertEqual(content[0], 'Test content 1') class FileIniTest(FileMixin, FullTestCase): def test_set__new_section__new_option__new_file(self): File(self.path, parser=IniParser, set={ ('Test section', 'Test option'): 'Test value', }) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 3) self.assertEqual(content[0], '[Test section]\n') self.assertEqual(content[1], 'test option = Test value\n') self.assertEqual(content[2], '\n') def test_set__new_section__new_option__existing_file(self): File(self.path, content='[Section 1]\noption 1 = Value 1\n') File(self.path, parser=IniParser, set={ ('Section 2', 'Option 2'): 'Value 2', }) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 6) self.assertEqual(content[0], '[Section 1]\n') self.assertEqual(content[1], 'option 1 = Value 1\n') self.assertEqual(content[2], '\n') self.assertEqual(content[3], '[Section 2]\n') self.assertEqual(content[4], 'option 2 = Value 2\n') self.assertEqual(content[5], '\n') def test_set__existing_section__new_option(self): File(self.path, content='[Section 1]\noption 1 = Value 1\n') File(self.path, parser=IniParser, set={ ('Section 1', 'Option 2'): 'Value 2', }) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 4) self.assertEqual(content[0], '[Section 1]\n') self.assertEqual(content[1], 'option 1 = Value 1\n') self.assertEqual(content[2], 'option 2 = Value 2\n') self.assertEqual(content[3], '\n') def test_set__existing_option(self): File(self.path, content='[Section 1]\noption 1 = Value 1\n') File(self.path, parser=IniParser, set={ ('Section 1', 'Option 1'): 'Value 2', }) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 3) self.assertEqual(content[0], '[Section 1]\n') self.assertEqual(content[1], 'option 1 = Value 2\n') self.assertEqual(content[2], '\n') def test_delete__existing_option__other_options(self): File(self.path, content=( '[Section 1]\n' 'option 1 = Value 1\n' 'option 2 = Value 2\n' )) File(self.path, parser=IniParser, delete=[('Section 1', 'option 1')]) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 3) self.assertEqual(content[0], '[Section 1]\n') self.assertEqual(content[1], 'option 2 = Value 2\n') self.assertEqual(content[2], '\n') def test_delete__existing_option__only_option(self): File(self.path, content='[Section 1]\noption 1 = Value 1\n') File(self.path, parser=IniParser, delete=[('Section 1', 'option 1')]) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 2) self.assertEqual(content[0], '[Section 1]\n') self.assertEqual(content[1], '\n') def test_delete__existing_section_with_options(self): File(self.path, content='[Section 1]\noption 1 = Value 1\n') File(self.path, parser=IniParser, delete=['Section 1']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 0) def test_delete__existing_section_empty(self): File(self.path, content='[Section 1]\n') File(self.path, parser=IniParser, delete=['Section 1']) self.registry_run() self.assertTrue(os.path.exists(self.path)) content = self.read() self.assertEqual(len(content), 0)
import json from django.test import TestCase from django.test import Client from local_settings import userdata_storage_path class FileManagerViewTests(TestCase): """ Tests for the Index app views """ fixtures = ['seed.json'] def test_get_data_set_list(self): """ Test '/file_manager/get_data_set_list/' """ expected_result = [{ "id": 47, "file_list": ['query1.txt', 'query2.txt', 'test_query.sh'], "allowed_access": ["test_user"], "name": "test_user_data", "owner": "test_user", "metadata": "" }] client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/get_data_set_list/') self.assertEqual(res.status_code, 200) data = json.loads(res.content) print data self.assertTrue(data == expected_result) def test_get_data_set_list_invalid(self): """ Test that get_data_set_list returns status=401 when unauthenicated """ client = Client() res = client.get('/file_manager/get_data_set_list/') self.assertEqual(res.status_code, 401) def test_get_data_set(self): """ test '/file_manager/get_data_set/' """ expected_result = { "id": 47, "file_list": ['query1.txt', 'query2.txt', 'test_query.sh'], "allowed_access": ["test_user"], "owner": "test_user", "name": "test_user_data", "metadata": "" } client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/get_data_set/', {'data_set_name': 'test_user_data'}) self.assertEqual(res.status_code, 200) data = json.loads(res.content) print data, expected_result self.assertTrue(data == expected_result) def test_get_data_set_fake_data_set(self): """ test '/file_manager/get_data_set/' """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/get_data_set/', {'data_set_name': 'DOES_NOT_EXIST'}) self.assertEqual(res.status_code, 200) data = json.loads(res.content) self.assertTrue(data == {}) def test_get_data_set_invalid(self): """ test file_manager.get_data_set return status=401 when unauthenticated """ client = Client() res = client.get('/file_manager/get_data_set/') self.assertEqual(res.status_code, 401) def test_get_data_set_bad_set(self): """ test file_manager.get_data_set returns status=402 when asking for a bad dataset """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/get_data_set/', {}) self.assertEqual(res.status_code, 402) def test_get_file_info(self): """ test file_manager.get_file_info """ expected_result = { "id": 93, 'owner': 'test_user', 'path': '/Users/baldwin32/projects/acme_workbench/userdata/test_user_data_test_user/query1.txt', 'display_name': 'query1.txt', 'data_type': 2, 'allowed_access': ['test_user'] } client = Client() client.login( username='test_user', password='qwertyuiop') params = { 'data_set_name': 'test_user_data', 'data_file_name': 'query1.txt' } res = client.get('/file_manager/get_file_info/', params) self.assertEqual(res.status_code, 200) data = json.loads(res.content) self.assertTrue(data == expected_result) def test_get_file_info_fake_file(self): """ test file_manager.get_file_info """ client = Client() client.login( username='test_user', password='qwertyuiop') params = { 'data_set_name': 'test_user_data', 'data_file_name': 'DOES_NOT_EXIST' } res = client.get('/file_manager/get_file_info/', params) self.assertEqual(res.status_code, 200) data = json.loads(res.content) self.assertTrue(data == {}) def test_get_file_info_unauthenticated(self): """ test file_manager.get_file_info return 401 with unauthenticated request """ client = Client() res = client.get('/file_manager/get_file_info/') self.assertEqual(res.status_code, 401) def test_get_file_info_no_dataset(self): """ test file_manager.get_file_info """ client = Client() client.login( username='test_user', password='qwertyuiop') params = { 'data_set_name': 'DOES_NOT_EXIST', 'data_file_name': 'query1.txt' } res = client.get('/file_manager/get_file_info/', params) self.assertEqual(res.status_code, 200) data = json.loads(res.content) self.assertTrue(data == {}) def test_get_file_info_bad_request(self): """ Test that file_manager.get_file_info returns 402 on bad request """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/get_file_info/', {}) self.assertEqual(res.status_code, 402) def test_upload_dataset_wrong_method(self): """ test file_manager.upload_dataset responds with status=401 without a POST request """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/upload_dataset/TEST') self.assertEqual(res.status_code, 401) def test_upload_dataset_unauthenticated(self): """ test file_manager.upload_dataset responds with status=401 without an authenticated user """ client = Client() res = client.post('/file_manager/upload_dataset/TEST') self.assertEqual(res.status_code, 401) def test_upload_dataset(self): """ test file_manager.upload_dataset accepts valid file upload """ client = Client() client.login( username='test_user', password='qwertyuiop') dataset_name = 'new_test_dataset' url = '/file_manager/upload_dataset/' + dataset_name with open('seed.json', 'r') as filepointer: res = client.post(url, {'file': filepointer}) self.assertEqual(res.status_code, 200) if res.status_code == 200: res = client.delete('/file_manager/delete_dataset/' + dataset_name) def test_delete_dataset_unauthenticated(self): """ test file_manager.delete_dataset rejects unauthenticated requests """ client = Client() res = client.delete('/file_manager/delete_dataset/TEST') self.assertEqual(res.status_code, 403) def test_delete_dataset_wrong_method(self): """ test file_manager.delete_dataset rejects unauthenticated requests """ client = Client() res = client.get('/file_manager/delete_dataset/TEST') self.assertEqual(res.status_code, 403) def test_delete_dataset_no_dataset(self): """ test file_manager.delete_dataset rejects unauthenticated requests """ client = Client() res = client.get('/file_manager/delete_dataset/') self.assertEqual(res.status_code, 404) def test_delete_dataset_bad_dataset(self): """ test file_manager.delete_dataset rejects unauthenticated requests """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.delete('/file_manager/delete_dataset/TEST') self.assertEqual(res.status_code, 404) def test_change_permissions_unauth(self): """ test file_manager.change_file_permissions returns status=401 on unauthenticated requests """ client = Client() res = client.get('/file_manager/change_file_permissions/') self.assertEqual(res.status_code, 401) def test_change_permissions_bad_method(self): """ test file_manager.change_file_permissions returns status=401 on unauthenticated requests """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.get('/file_manager/change_file_permissions/') self.assertEqual(res.status_code, 401) def test_change_permissions_bad_params(self): """ test file_manager.change_file_permissions returns status=401 with no params """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.post('/file_manager/change_file_permissions/', {}) self.assertEqual(res.status_code, 401) def test_change_permissions_bad_params_2(self): """ test file_manager.change_file_permissions returns status=401 with bad params """ client = Client() client.login( username='test_user', password='qwertyuiop') res = client.post( '/file_manager/change_file_permissions/', data=json.dumps({'user_list': []}), content_type="application/json") self.assertEqual(res.status_code, 401) def test_change_permissions_bad_file_id(self): """ test file_manager.change_file_permissions returns status=401 with bad params """ client = Client() client.login( username='test_user', password='qwertyuiop') params = { 'user_list': ['baldwin32'], 'file': 999 } res = client.post( '/file_manager/change_file_permissions/', data=json.dumps(params), content_type="application/json") self.assertEqual(res.status_code, 401) def test_change_permissions_bad_user(self): """ test file_manager.change_file_permissions returns status=401 with bad params """ client = Client() client.login( username='test_user', password='qwertyuiop') params = { 'user_list': ['baldwin32'], 'file': 90 } res = client.post( '/file_manager/change_file_permissions/', data=json.dumps(params), content_type="application/json") self.assertEqual(res.status_code, 403) def test_change_permissions_valid_add_and_remove(self): """ test file_manager.change_file_permissions returns status=401 with bad params """ client = Client() client.login( username='test_user', password='qwertyuiop') params = { 'user_list': ['baldwin32'], 'file': 93 } res = client.post( '/file_manager/change_file_permissions/', data=json.dumps(params), content_type="application/json") self.assertEqual(res.status_code, 200) res = client.delete( '/file_manager/change_file_permissions/', data=json.dumps(params), content_type="application/json") self.assertEqual(res.status_code, 200)
# -*- coding: utf-8 -*- #------------------------------------------------------------------------------- # Name: zosutils.py # Purpose: Utilities for pyzos # Licence: MIT License # This file is subject to the terms and conditions of the MIT License. # For further details, please refer to LICENSE.txt #------------------------------------------------------------------------------- from __future__ import division, print_function import sys as _sys from win32com.client import CastTo as _CastTo def get_callable_method_dict(obj): """Returns a dictionary of callable methods of object `obj`. @param obj: ZOS API Python COM object @return: a dictionary of callable methods Notes: the function only returns the callable attributes that are listed by dir() function. Properties are not returned. """ methodDict = {} for methodStr in dir(obj): method = getattr(obj, methodStr, 'none') if callable(method) and not methodStr.startswith('_'): methodDict[methodStr] = method return methodDict def replicate_methods(srcObj, dstObj): """Replicate callable methods from a `srcObj` to `dstObj` (generally a wrapper object). @param srcObj: source object @param dstObj: destination object of the same type. @return : none Implementer notes: 1. Once the methods are mapped from the `srcObj` to the `dstObj`, the method calls will not get "routed" through `__getattr__` method (if implemented) in `type(dstObj)` class. 2. An example of what a 'key' and 'value' look like: key: MakeSequential value: <bound method IOpticalSystem.MakeSequential of <win32com.gen_py.ZOSAPI_Interfaces.IOpticalSystem instance at 0x77183968>> """ # prevent methods that we intend to specialize from being mapped. The specialized # (overridden) methods are methods with the same name as the corresponding method in # the source ZOS API COM object written for each ZOS API COM object in an associated # python script such as i_analyses_methods.py for I_Analyses overridden_methods = get_callable_method_dict(type(dstObj)).keys() #overridden_attrs = [each for each in type(dstObj).__dict__.keys() if not each.startswith('_')] # def zos_wrapper_deco(func): def wrapper(*args, **kwargs): return wrapped_zos_object(func(*args, **kwargs)) varnames = func.im_func.func_code.co_varnames # alternative is to use inspect.getargspec params = [par for par in varnames if par not in ('self', 'ret')] # removes 'self' and 'ret' wrapper.__doc__ = func.im_func.func_name + '(' + ', '.join(params) + ')' return wrapper # for key, value in get_callable_method_dict(srcObj).items(): if key not in overridden_methods: setattr(dstObj, key, zos_wrapper_deco(value)) def get_properties(zos_obj): """Returns a lists of properties bound to the object `zos_obj` @param zos_obj: ZOS API Python COM object @return prop_get: list of properties that are only getters @return prop_set: list of properties that are both getters and setters """ prop_get = set(zos_obj._prop_map_get_.keys()) prop_set = set(zos_obj._prop_map_put_.keys()) if prop_set.issubset(prop_get): prop_get = prop_get.difference(prop_set) else: msg = 'Assumption all getters are also setters is incorrect!' raise NotImplementedError(msg) return list(prop_get), list(prop_set) #%% class ZOSPropMapper(object): """Descriptor for mapping ZOS object properties to corresponding wrapper classes """ def __init__(self, zos_interface_attr, property_name, setter=False, cast_to=None): """ @param zos_interface_attr : attribute used to dispatch method/property calls to the zos_object (it hold the zos_object) @param propname : string, like 'SystemName' for IOpticalSystem @param setter : if False, a read-only data descriptor is created @param cast_to : Name of class (generally the base class) whose property to call """ self.property_name = property_name # property_name is a string like 'SystemName' for IOpticalSystem self.zos_interface_attr = zos_interface_attr self.setter = setter self.cast_to = cast_to def __get__(self, obj, objtype): if self.cast_to: return wrapped_zos_object(getattr(_CastTo(obj.__dict__[self.zos_interface_attr], self.cast_to), self.property_name)) else: return wrapped_zos_object(getattr(obj.__dict__[self.zos_interface_attr], self.property_name)) def __set__(self, obj, value): if self.setter: if self.cast_to: setattr(_CastTo(obj.__dict__[self.zos_interface_attr], self.cast_to), self.property_name, value) else: setattr(obj.__dict__[self.zos_interface_attr], self.property_name, value) else: raise AttributeError("Can't set {}".format(self.property_name)) def managed_wrapper_class_factory(zos_obj): """Creates and returns a wrapper class of a ZOS object, exposing the ZOS objects methods and propertis, and patching custom specialized attributes @param zos_obj: ZOS API Python COM object """ cls_name = repr(zos_obj).split()[0].split('.')[-1] dispatch_attr = '_' + cls_name.lower() # protocol to be followed to store the ZOS COM object cdict = {} # class dictionary # patch the properties of the base objects base_cls_list = inheritance_dict.get(cls_name, None) if base_cls_list: for base_cls_name in base_cls_list: getters, setters = get_properties(_CastTo(zos_obj, base_cls_name)) for each in getters: exec("p{} = ZOSPropMapper('{}', '{}', cast_to='{}')".format(each, dispatch_attr, each, base_cls_name), globals(), cdict) for each in setters: exec("p{} = ZOSPropMapper('{}', '{}', setter=True, cast_to='{}')".format(each, dispatch_attr, each, base_cls_name), globals(), cdict) # patch the property attributes of the given ZOS object getters, setters = get_properties(zos_obj) for each in getters: exec("p{} = ZOSPropMapper('{}', '{}')".format(each, dispatch_attr, each), globals(), cdict) for each in setters: exec("p{} = ZOSPropMapper('{}', '{}', setter=True)".format(each, dispatch_attr, each), globals(), cdict) def __init__(self, zos_obj): # dispatcher attribute cls_name = repr(zos_obj).split()[0].split('.')[-1] dispatch_attr = '_' + cls_name.lower() # protocol to be followed to store the ZOS COM object self.__dict__[dispatch_attr] = zos_obj self._dispatch_attr_value = dispatch_attr # used in __getattr__ # Store base class object self._base_cls_list = inheritance_dict.get(cls_name, None) # patch the methods of the base class(s) of the given ZOS object if self._base_cls_list: for base_cls_name in self._base_cls_list: replicate_methods(_CastTo(zos_obj, base_cls_name), self) # patch the methods of given ZOS object replicate_methods(zos_obj, self) # mark object as wrapped to prevent it from being wrapped subsequently self._wrapped = True # Provide a way to make property calls without the prefix p def __getattr__(self, attrname): return wrapped_zos_object(getattr(self.__dict__[self._dispatch_attr_value], attrname)) def __repr__(self): if type(self).__name__ == 'IZOSAPI_Application': repr_str = "{.__name__}(NumberOfOpticalSystems = {})".format(type(self), self.pNumberOfOpticalSystems) else: repr_str = "{.__name__}".format(type(self)) return repr_str cdict['__init__'] = __init__ cdict['__getattr__'] = __getattr__ cdict['__repr__'] = __repr__ # patch custom methods from python files imported as modules module_import_str = """ try: from pyzos.zos_obj_override.{module:} import * except ImportError: pass """.format(module=cls_name.lower() + '_methods') exec(module_import_str, globals(), cdict) _ = cdict.pop('print_function', None) _ = cdict.pop('division', None) return type(cls_name, (), cdict) def wrapped_zos_object(zos_obj): """Helper function to wrap ZOS API COM objects. @param zos_obj : ZOS API Python COM object @return: instance of the wrapped ZOS API class. If the input object is not a ZOS-API COM object or if it is already wrapped, then the object is returned without wrapping. Notes: The function dynamically creates a wrapped class with all the provided methods, properties, and custom methods monkey patched; and returns an instance of it. """ if hasattr(zos_obj, '_wrapped') or ('CLSID' not in dir(zos_obj)): return zos_obj else: Class = managed_wrapper_class_factory(zos_obj) return Class(zos_obj) #%% ZOS object inheritance relationships dictionary # Unfortunately this dict is created manually following the ZOS-API documentation. There # is no way to know this relationship querying the pythoncom objects. # Rules (and assumptions made by functions using this dict): # 1. The base class hierarchy is encoded as lists (i.e. elements are ordered) in the value fields of the dict # 2. The dict only contain those ZOS objects that have one or more parent classes. i.e. empty lists are not # allowed. # 3. The order of super classes in each list: [immediate-base-cls, next-level-base-cls, ..., top-most-base-cls] inheritance_dict = { ## IEditor Interface - base interface for all 5 editors 'ILensDataEditor' : ['IEditor',], 'IMultiConfigEditor' : ['IEditor',], 'IMeritFunctionEditor' : ['IEditor',], 'INonSeqEditor' : ['IEditor',], 'IToleranceDataEditor' : ['IEditor',], ## IAS_ Interface - base class for all analysis settings interfaces # Aberrations interface settings 'IAS_FieldCurvatureAndDistortion' : ['IAS_',], 'IAS_FocalShiftDiagram' : ['IAS_',], 'IAS_GridDistortion' : ['IAS_',], 'IAS_LateralColor' : ['IAS_',], 'IAS_LongitudinalAberration' : ['IAS_',], 'IAS_RayTrace' : ['IAS_',], 'IAS_SeidelCoefficients' : ['IAS_',], 'IAS_SeidelDiagram' : ['IAS_',], 'IAS_ZernikeAnnularCoefficients' : ['IAS_',], 'IAS_ZernikeCoefficientsVsField' : ['IAS_',], 'IAS_ZernikeFringeCoefficients' : ['IAS_',], 'IAS_ZernikeStandardCoefficients' : ['IAS_',], # EncircledEnergy interface settings 'IAS_DiffractionEncircledEnergy' : ['IAS_',], 'IAS_ExtendedSourceEncircledEnergy' : ['IAS_',], 'IAS_GeometricEncircledEnergy' : ['IAS_',], 'IAS_GeometricLineEdgeSpread' : ['IAS_',], # Fans interface settings 'IAS_Fan' : ['IAS_',], # Mtf interface settings 'IAS_FftMtf' : ['IAS_',], 'IAS_FftMtfMap' : ['IAS_',], 'IAS_FftMtfvsField' : ['IAS_',], 'IAS_FftSurfaceMtf' : ['IAS_',], 'IAS_FftThroughFocusMtf' : ['IAS_',], 'IAS_GeometricMtf' : ['IAS_',], 'IAS_GeometricMtfMap' : ['IAS_',], 'IAS_GeometricMtfvsField' : ['IAS_',], 'IAS_GeometricThroughFocusMtf' : ['IAS_',], 'IAS_HuygensMtf' : ['IAS_',], 'IAS_HuygensMtfvsField' : ['IAS_',], 'IAS_HuygensSurfaceMtf' : ['IAS_',], 'IAS_HuygensThroughFocusMtf' : ['IAS_',], # Psf interface settings 'IAS_FftPsf' : ['IAS_',], 'IAS_FftPsfCrossSection' : ['IAS_',], 'IAS_FftPsfLineEdgeSpread' : ['IAS_',], 'IAS_HuygensPsf' : ['IAS_',], 'IAS_HuygensPsfCrossSection' : ['IAS_',], # RayTracing interface settings 'IAS_DetectorViewer' : ['IAS_',], # RMS interface settings 'IAS_RMSField' : ['IAS_',], 'IAS_RMSFieldMap' : ['IAS_',], 'IAS_RMSFocus' : ['IAS_',], 'IAS_RMSLambdaDiagram' : ['IAS_',], # Spot interface settings 'IAS_Spot' : ['IAS_',], # Surface interface settings 'IAS_SurfaceCurvature' : ['IAS_',], 'IAS_SurfaceCurvatureCross' : ['IAS_',], 'IAS_SurfacePhase' : ['IAS_',], 'IAS_SurfacePhaseCross' : ['IAS_',], 'IAS_SurfaceSag' : ['IAS_',], 'IAS_SurfaceSagCross' : ['IAS_',], # Wavefront interface settings 'IAS_Foucault' : ['IAS_',], ## IOpticalSystemTools Interface - base class for all system tools 'IBatchRayTrace' : ['ISystemTool',], 'IConvertToNSCGroup' : ['ISystemTool',], 'ICreateArchive' : ['ISystemTool',], 'IExportCAD' : ['ISystemTool',], 'IGlobalOptimization' : ['ISystemTool',], 'IHammerOptimization' : ['ISystemTool',], 'ILensCatalogs' : ['ISystemTool',], 'ILightningTrace' : ['ISystemTool',], 'ILocalOptimization' : ['ISystemTool',], 'IMFCalculator' : ['ISystemTool',], 'INSCRayTrace' : ['ISystemTool',], 'IQuickAdjust' : ['ISystemTool',], 'IQuickFocus' : ['ISystemTool',], 'IRestoreArchive' : ['ISystemTool',], 'IScale' : ['ISystemTool',], 'ITolerancing' : ['ISystemTool',], ## IWizard Interface - base interface for all wizards 'INSCWizard' : ['IWizard',], 'INSCBitmapWizard' : ['INSCWizard', 'IWizard',], 'INSCOptimizationWizard' : ['INSCWizard', 'IWizard',], 'INSCRoadwayLightingWizard' : ['IWizard',], 'IToleranceWizard' : ['IWizard',], 'INSCToleranceWizard': ['IToleranceWizard', 'IWizard',], 'ISEQToleranceWizard' : ['IToleranceWizard', 'IWizard',], 'ISEQOptimizationWizard' : ['IWizard',], } # Ensure Rule #2 of inheritance_dict. for each in inheritance_dict.values(): assert len(each), 'Empty base class list not allowed in inheritance_dict'
# This file is part of BenchExec, a framework for reliable benchmarking: # https://github.com/sosy-lab/benchexec # # SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org> # # SPDX-License-Identifier: Apache-2.0 import errno import grp import logging import os import shutil import signal import stat import sys import tempfile import time from benchexec import systeminfo from benchexec import util __all__ = [ "find_my_cgroups", "BLKIO", "CPUACCT", "CPUSET", "FREEZER", "MEMORY", ] CGROUP_FALLBACK_PATH = "system.slice/benchexec-cgroup.service" """If we do not have write access to the current cgroup, attempt to use this cgroup as fallback.""" CGROUP_NAME_PREFIX = "benchmark_" BLKIO = "blkio" CPUACCT = "cpuacct" CPUSET = "cpuset" FREEZER = "freezer" MEMORY = "memory" ALL_KNOWN_SUBSYSTEMS = { # cgroups for BenchExec BLKIO, CPUACCT, CPUSET, FREEZER, MEMORY, # other cgroups users might want "cpu", "devices", "net_cls", "net_prio", "hugetlb", "perf_event", "pids", } _PERMISSION_HINT_GROUPS = """ You need to add your account to the following groups: {0} Remember to logout and login again afterwards to make group changes effective.""" _PERMISSION_HINT_DEBIAN = """ The recommended way to fix this is to install the Debian package for BenchExec and add your account to the group "benchexec": https://github.com/sosy-lab/benchexec/blob/master/doc/INSTALL.md#debianubuntu Alternatively, you can install benchexec-cgroup.service manually: https://github.com/sosy-lab/benchexec/blob/master/doc/INSTALL.md#setting-up-cgroups-on-machines-with-systemd""" _PERMISSION_HINT_SYSTEMD = """ The recommended way to fix this is to add your account to a group named "benchexec" and install benchexec-cgroup.service: https://github.com/sosy-lab/benchexec/blob/master/doc/INSTALL.md#setting-up-cgroups-on-machines-with-systemd""" _PERMISSION_HINT_OTHER = """ Please configure your system in way to allow your user to use cgroups: https://github.com/sosy-lab/benchexec/blob/master/doc/INSTALL.md#setting-up-cgroups-on-machines-without-systemd""" _ERROR_MSG_PERMISSIONS = """ Required cgroups are not available because of missing permissions.{0} As a temporary workaround, you can also run "sudo chmod o+wt {1}" Note that this will grant permissions to more users than typically desired and it will only last until the next reboot.""" _ERROR_MSG_OTHER = """ Required cgroups are not available. If you are using BenchExec within a container, please make "/sys/fs/cgroup" available.""" def find_my_cgroups(cgroup_paths=None, fallback=True): """ Return a Cgroup object with the cgroups of the current process. Note that it is not guaranteed that all subsystems are available in the returned object, as a subsystem may not be mounted. Check with "subsystem in <instance>" before using. A subsystem may also be present but we do not have the rights to create child cgroups, this can be checked with require_subsystem(). @param cgroup_paths: If given, use this instead of reading /proc/self/cgroup. @param fallback: Whether to look for a default cgroup as fallback is our cgroup is not accessible. """ logging.debug( "Analyzing /proc/mounts and /proc/self/cgroup for determining cgroups." ) if cgroup_paths is None: my_cgroups = dict(_find_own_cgroups()) else: my_cgroups = dict(_parse_proc_pid_cgroup(cgroup_paths)) cgroupsParents = {} for subsystem, mount in _find_cgroup_mounts(): # Ignore mount points where we do not have any access, # e.g. because a parent directory has insufficient permissions # (lxcfs mounts cgroups under /run/lxcfs in such a way). if os.access(mount, os.F_OK): cgroupPath = os.path.join(mount, my_cgroups[subsystem]) fallbackPath = os.path.join(mount, CGROUP_FALLBACK_PATH) if ( fallback and not os.access(cgroupPath, os.W_OK) and os.path.isdir(fallbackPath) ): cgroupPath = fallbackPath cgroupsParents[subsystem] = cgroupPath return Cgroup(cgroupsParents) def _find_cgroup_mounts(): """ Return the information which subsystems are mounted where. @return a generator of tuples (subsystem, mountpoint) """ try: with open("/proc/mounts", "rt") as mountsFile: for mount in mountsFile: mount = mount.split(" ") if mount[2] == "cgroup": mountpoint = mount[1] options = mount[3] for option in options.split(","): if option in ALL_KNOWN_SUBSYSTEMS: yield (option, mountpoint) except OSError: logging.exception("Cannot read /proc/mounts") def _find_own_cgroups(): """ For all subsystems, return the information in which (sub-)cgroup this process is in. (Each process is in exactly cgroup in each hierarchy.) @return a generator of tuples (subsystem, cgroup) """ try: with open("/proc/self/cgroup", "rt") as ownCgroupsFile: for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile): yield cgroup except OSError: logging.exception("Cannot read /proc/self/cgroup") def _parse_proc_pid_cgroup(content): """ Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup). @param content: An iterable over the lines of the file. @return: a generator of tuples """ for ownCgroup in content: # each line is "id:subsystem,subsystem:path" ownCgroup = ownCgroup.strip().split(":") try: path = ownCgroup[2][1:] # remove leading / except IndexError: raise IndexError(f"index out of range for {ownCgroup}") for subsystem in ownCgroup[1].split(","): yield (subsystem, path) def kill_all_tasks_in_cgroup(cgroup, ensure_empty=True): tasksFile = os.path.join(cgroup, "tasks") i = 0 while True: i += 1 # TODO We can probably remove this loop over signals and just send # SIGKILL. We added this loop when killing sub-processes was not reliable # and we did not know why, but now it is reliable. for sig in [signal.SIGKILL, signal.SIGINT, signal.SIGTERM]: with open(tasksFile, "rt") as tasks: task = None for task in tasks: task = task.strip() if i > 1: logging.warning( "Run has left-over process with pid %s " "in cgroup %s, sending signal %s (try %s).", task, cgroup, sig, i, ) util.kill_process(int(task), sig) if task is None or not ensure_empty: return # No process was hanging, exit # wait for the process to exit, this might take some time time.sleep(i * 0.5) def remove_cgroup(cgroup): if not os.path.exists(cgroup): logging.warning("Cannot remove CGroup %s, because it does not exist.", cgroup) return assert os.path.getsize(os.path.join(cgroup, "tasks")) == 0 try: os.rmdir(cgroup) except OSError: # sometimes this fails because the cgroup is still busy, we try again once try: os.rmdir(cgroup) except OSError as e: logging.warning( "Failed to remove cgroup %s: error %s (%s)", cgroup, e.errno, e.strerror ) def _register_process_with_cgrulesengd(pid): """Tell cgrulesengd daemon to not move the given process into other cgroups, if libcgroup is available. """ # Logging/printing from inside preexec_fn would end up in the output file, # not in the correct logger, thus it is disabled here. from ctypes import cdll try: libcgroup = cdll.LoadLibrary("libcgroup.so.1") failure = libcgroup.cgroup_init() if failure: pass else: CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1 failure = libcgroup.cgroup_register_unchanged_process( pid, CGROUP_DAEMON_UNCHANGE_CHILDREN ) if failure: pass # print(f'Could not register process to cgrulesndg, error {success}. ' # 'Probably the daemon will mess up our cgroups.') except OSError: pass class Cgroup(object): def __init__(self, cgroupsPerSubsystem): assert set(cgroupsPerSubsystem.keys()) <= ALL_KNOWN_SUBSYSTEMS assert all(cgroupsPerSubsystem.values()) # Also update self.paths on every update to this! self.per_subsystem = cgroupsPerSubsystem self.paths = set(cgroupsPerSubsystem.values()) # without duplicates # for error messages: self.unusable_subsystems = set() self.denied_subsystems = {} def __contains__(self, key): return key in self.per_subsystem def __getitem__(self, key): return self.per_subsystem[key] def __str__(self): return str(self.paths) def require_subsystem(self, subsystem, log_method=logging.warning): """ Check whether the given subsystem is enabled and is writable (i.e., new cgroups can be created for it). Produces a log message for the user if one of the conditions is not fulfilled. If the subsystem is enabled but not writable, it will be removed from this instance such that further checks with "in" will return "False". @return A boolean value. """ if subsystem not in self: if subsystem not in self.unusable_subsystems: self.unusable_subsystems.add(subsystem) log_method( "Cgroup subsystem %s is not available. " "Please make sure it is supported by your kernel and mounted.", subsystem, ) return False try: test_cgroup = self.create_fresh_child_cgroup(subsystem) test_cgroup.remove() except OSError as e: log_method( "Cannot use cgroup %s for subsystem %s, reason: %s (%s).", self.per_subsystem[subsystem], subsystem, e.strerror, e.errno, ) self.unusable_subsystems.add(subsystem) if e.errno == errno.EACCES: self.denied_subsystems[subsystem] = self.per_subsystem[subsystem] del self.per_subsystem[subsystem] self.paths = set(self.per_subsystem.values()) return False return True def handle_errors(self, critical_cgroups): """ If there were errors in calls to require_subsystem() and critical_cgroups is not empty, terminate the program with an error message that explains how to fix the problem. @param critical_cgroups: set of unusable but required cgroups """ if not critical_cgroups: return assert critical_cgroups.issubset(self.unusable_subsystems) if critical_cgroups.issubset(self.denied_subsystems): # All errors were because of permissions for these directories paths = sorted(set(self.denied_subsystems.values())) # Check if all cgroups have group permissions and user could just be added # to some groups to get access. But group 0 (root) of course does not count. groups = {} try: if all(stat.S_IWGRP & os.stat(path).st_mode for path in paths): groups = {os.stat(path).st_gid for path in paths} except OSError: pass if groups and 0 not in groups: def get_group_name(gid): try: name = grp.getgrgid(gid).gr_name except KeyError: name = None return util.escape_string_shell(name or str(gid)) groups = " ".join(sorted(set(map(get_group_name, groups)))) permission_hint = _PERMISSION_HINT_GROUPS.format(groups) elif systeminfo.has_systemd(): if systeminfo.is_debian(): permission_hint = _PERMISSION_HINT_DEBIAN else: permission_hint = _PERMISSION_HINT_SYSTEMD else: permission_hint = _PERMISSION_HINT_OTHER paths = " ".join(map(util.escape_string_shell, paths)) sys.exit(_ERROR_MSG_PERMISSIONS.format(permission_hint, paths)) else: sys.exit(_ERROR_MSG_OTHER) # e.g., subsystem not mounted def create_fresh_child_cgroup(self, *subsystems): """ Create child cgroups of the current cgroup for at least the given subsystems. @return: A Cgroup instance representing the new child cgroup(s). """ assert set(subsystems).issubset(self.per_subsystem.keys()) createdCgroupsPerSubsystem = {} createdCgroupsPerParent = {} for subsystem in subsystems: parentCgroup = self.per_subsystem[subsystem] if parentCgroup in createdCgroupsPerParent: # reuse already created cgroup createdCgroupsPerSubsystem[subsystem] = createdCgroupsPerParent[ parentCgroup ] continue cgroup = tempfile.mkdtemp(prefix=CGROUP_NAME_PREFIX, dir=parentCgroup) createdCgroupsPerSubsystem[subsystem] = cgroup createdCgroupsPerParent[parentCgroup] = cgroup # add allowed cpus and memory to cgroup if necessary # (otherwise we can't add any tasks) def copy_parent_to_child(name): shutil.copyfile( os.path.join(parentCgroup, name), os.path.join(cgroup, name) ) try: copy_parent_to_child("cpuset.cpus") copy_parent_to_child("cpuset.mems") except OSError: # expected to fail if cpuset subsystem is not enabled in this hierarchy pass return Cgroup(createdCgroupsPerSubsystem) def add_task(self, pid): """ Add a process to the cgroups represented by this instance. """ _register_process_with_cgrulesengd(pid) for cgroup in self.paths: with open(os.path.join(cgroup, "tasks"), "w") as tasksFile: tasksFile.write(str(pid)) def get_all_tasks(self, subsystem): """ Return a generator of all PIDs currently in this cgroup for the given subsystem. """ with open( os.path.join(self.per_subsystem[subsystem], "tasks"), "r" ) as tasksFile: for line in tasksFile: yield int(line) def kill_all_tasks(self): """ Kill all tasks in this cgroup and all its children cgroups forcefully. Additionally, the children cgroups will be deleted. """ def kill_all_tasks_in_cgroup_recursively(cgroup, delete): for dirpath, dirs, _files in os.walk(cgroup, topdown=False): for subCgroup in dirs: subCgroup = os.path.join(dirpath, subCgroup) kill_all_tasks_in_cgroup(subCgroup, ensure_empty=delete) if delete: remove_cgroup(subCgroup) kill_all_tasks_in_cgroup(cgroup, ensure_empty=delete) # First, we go through all cgroups recursively while they are frozen and kill # all processes. This helps against fork bombs and prevents processes from # creating new subgroups while we are trying to kill everything. # But this is only possible if we have freezer, and all processes will stay # until they are thawed (so we cannot check for cgroup emptiness and we cannot # delete subgroups). if FREEZER in self.per_subsystem: cgroup = self.per_subsystem[FREEZER] freezer_file = os.path.join(cgroup, "freezer.state") util.write_file("FROZEN", freezer_file) kill_all_tasks_in_cgroup_recursively(cgroup, delete=False) util.write_file("THAWED", freezer_file) # Second, we go through all cgroups again, kill what is left, # check for emptiness, and remove subgroups. # Furthermore, we do this for all hierarchies, not only the one with freezer. for cgroup in self.paths: kill_all_tasks_in_cgroup_recursively(cgroup, delete=True) def has_value(self, subsystem, option): """ Check whether the given value exists in the given subsystem. Does not make a difference whether the value is readable, writable, or both. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self return os.path.isfile( os.path.join(self.per_subsystem[subsystem], f"{subsystem}.{option}") ) def get_value(self, subsystem, option): """ Read the given value from the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self, f"Subsystem {subsystem} is missing" return util.read_file(self.per_subsystem[subsystem], f"{subsystem}.{option}") def get_file_lines(self, subsystem, option): """ Read the lines of the given file from the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self with open( os.path.join(self.per_subsystem[subsystem], f"{subsystem}.{option}") ) as f: for line in f: yield line def get_key_value_pairs(self, subsystem, filename): """ Read the lines of the given file from the given subsystem and split the lines into key-value pairs. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self return util.read_key_value_pairs_from_file( self.per_subsystem[subsystem], f"{subsystem}.{filename}" ) def set_value(self, subsystem, option, value): """ Write the given value for the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self util.write_file( str(value), self.per_subsystem[subsystem], f"{subsystem}.{option}" ) def remove(self): """ Remove all cgroups this instance represents from the system. This instance is afterwards not usable anymore! """ for cgroup in self.paths: remove_cgroup(cgroup) del self.paths del self.per_subsystem def read_cputime(self): """ Read the cputime usage of this cgroup. CPUACCT cgroup needs to be available. @return cputime usage in seconds """ # convert nano-seconds to seconds return float(self.get_value(CPUACCT, "usage")) / 1_000_000_000 def read_allowed_memory_banks(self): """Get the list of all memory banks allowed by this cgroup.""" return util.parse_int_list(self.get_value(CPUSET, "mems"))
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class DialogueList(ListResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, assistant_sid): """ Initialize the DialogueList :param Version version: Version that contains the resource :param assistant_sid: The SID of the Assistant that is the parent of the resource :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList """ super(DialogueList, self).__init__(version) # Path Solution self._solution = {'assistant_sid': assistant_sid, } def get(self, sid): """ Constructs a DialogueContext :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext """ return DialogueContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a DialogueContext :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext """ return DialogueContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Autopilot.V1.DialogueList>' class DialoguePage(Page): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, response, solution): """ Initialize the DialoguePage :param Version version: Version that contains the resource :param Response response: Response from the API :param assistant_sid: The SID of the Assistant that is the parent of the resource :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialoguePage :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialoguePage """ super(DialoguePage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of DialogueInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance """ return DialogueInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Autopilot.V1.DialoguePage>' class DialogueContext(InstanceContext): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, assistant_sid, sid): """ Initialize the DialogueContext :param Version version: Version that contains the resource :param assistant_sid: The SID of the Assistant that is the parent of the resource to fetch :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext """ super(DialogueContext, self).__init__(version) # Path Solution self._solution = {'assistant_sid': assistant_sid, 'sid': sid, } self._uri = '/Assistants/{assistant_sid}/Dialogues/{sid}'.format(**self._solution) def fetch(self): """ Fetch the DialogueInstance :returns: The fetched DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance """ payload = self._version.fetch(method='GET', uri=self._uri, ) return DialogueInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], sid=self._solution['sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Autopilot.V1.DialogueContext {}>'.format(context) class DialogueInstance(InstanceResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, payload, assistant_sid, sid=None): """ Initialize the DialogueInstance :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance """ super(DialogueInstance, self).__init__(version) # Marshaled Properties self._properties = { 'account_sid': payload.get('account_sid'), 'assistant_sid': payload.get('assistant_sid'), 'sid': payload.get('sid'), 'data': payload.get('data'), 'url': payload.get('url'), } # Context self._context = None self._solution = {'assistant_sid': assistant_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: DialogueContext for this DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueContext """ if self._context is None: self._context = DialogueContext( self._version, assistant_sid=self._solution['assistant_sid'], sid=self._solution['sid'], ) return self._context @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def assistant_sid(self): """ :returns: The SID of the Assistant that is the parent of the resource :rtype: unicode """ return self._properties['assistant_sid'] @property def sid(self): """ :returns: The unique string that identifies the resource :rtype: unicode """ return self._properties['sid'] @property def data(self): """ :returns: The JSON string that describes the dialogue session object :rtype: dict """ return self._properties['data'] @property def url(self): """ :returns: The absolute URL of the Dialogue resource :rtype: unicode """ return self._properties['url'] def fetch(self): """ Fetch the DialogueInstance :returns: The fetched DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance """ return self._proxy.fetch() def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Autopilot.V1.DialogueInstance {}>'.format(context)
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main program controller for BomValidate This program will: (1) Deploy Spinnaker as specified via command-line (2) Configure Spinnaker as specified via command-line (3) Run the test suite or some subset there-of, as specified via command-line (4) Collect and report on the results (5) Tear down the deployment Sample usage: ./validate_bom.sh \ --deploy_hal_platform=gce \ --deploy_spinnaker_type=localdebian \ --deploy_google_project=$PROJECT \ --deploy_google_instance=$INSTANCE \ --spinnaker_storage=gcs \ --storage_gcs_bucket=$BUCKET \ --google_account_credentials=$GOOGLE_CREDENTIAL_PATH \ --google_account_project=$PROJECT //dev/validate_bom.sh \ --deploy_google_project=$PROJECT \ --deploy_google_instance=$INSTANCE \ --spinnaker_storage=gcs \ --storage_gcs_bucket=$BUCKET \ --storage_gcs_credentials=$GOOGLE_CREDENTIAL_PATH \ --google_account_credentials=$GOOGLE_CREDENTIAL_PATH \ --google_account_project=$PROJECT \ --k8s_account_credentials=$HOME/.kube/config \ --k8s_account_docker_account=my-docker-account \ --docker_account_address=https://index.docker.io \ --docker_account_repositories=library/nginx \ --deploy_hal_platform=gce \ --deploy_spinnaker_type=distributed \ --deploy_k8s_namespace=spinnaker \ --test_include=(kube|front50) \ --deploy_undeploy=false \ --deploy_deploy=false """ import argparse import logging import os import sys import yaml from buildtool.__main__ import ( STANDARD_LOG_LEVELS, preprocess_args, add_standard_parser_args) from buildtool.metrics import MetricsManager from buildtool import ( add_parser_argument, run_subprocess) import validate_bom__config import validate_bom__deploy import validate_bom__test def build_report(test_controller): """Report on the test results.""" options = test_controller.options citest_log_dir = os.path.join(options.log_dir, 'citest_logs') if not os.path.exists(citest_log_dir): logging.warning('%s does not exist -- no citest logs.', citest_log_dir) return None retcode, stdout = run_subprocess( 'python -m citest.reporting.generate_html_report --index *.journal', shell=True, cwd=citest_log_dir) if retcode != 0: logging.error('Error building report: %s', stdout) logging.info('Logging information is in %s', options.log_dir) return test_controller.build_summary() def get_options(args): """Resolve all the command-line options.""" args, defaults = preprocess_args( args, default_home_path_filename='validate_bom.yml') parser = argparse.ArgumentParser(prog='validate_bom.sh') add_standard_parser_args(parser, defaults) # DEPRECATED - use output_dir instead add_parser_argument(parser, 'log_dir', defaults, './validate_bom_results', help='Path to root directory for report output.') MetricsManager.init_argument_parser(parser, defaults) validate_bom__config.init_argument_parser(parser, defaults) validate_bom__deploy.init_argument_parser(parser, defaults) validate_bom__test.init_argument_parser(parser, defaults) options = parser.parse_args(args) options.program = 'validate_bom' options.command = 'validate_bom' # metrics assumes a "command" value. options.log_dir = options.output_dir # deprecated validate_bom__config.validate_options(options) validate_bom__test.validate_options(options) if not os.path.exists(options.log_dir): os.makedirs(options.log_dir) if options.influxdb_database == 'SpinnakerBuildTool': options.influxdb_database = 'SpinnakerValidate' # Add platform/spinnaker_type to each metric we produce. # We'll use this to distinguish what was being tested. context_labels = 'platform=%s,deployment_type=%s' % ( validate_bom__deploy.determine_deployment_platform(options), options.deploy_spinnaker_type) latest_unvalidated_suffix = '-latest-unvalidated' if options.deploy_version.endswith(latest_unvalidated_suffix): bom_series = options.deploy_version[:-len(latest_unvalidated_suffix)] else: bom_series = options.deploy_version[:options.deploy_version.rfind('-')] context_labels += ',version=%s' % bom_series if options.monitoring_context_labels: context_labels += ',' + options.monitoring_context_labels options.monitoring_context_labels = context_labels return options def main(options, metrics): """The main controller.""" outcome_success = False deployer = validate_bom__deploy.make_deployer(options, metrics) test_controller = validate_bom__test.ValidateBomTestController(deployer) if options.deploy_deploy: validate_bom__config.setup_environment(options) init_script, config_script = validate_bom__config.make_scripts(options) file_set = validate_bom__config.get_files_to_upload(options) try: deployer.deploy(init_script, config_script, file_set) _, failed, _ = test_controller.run_tests() outcome_success = not failed finally: if sys.exc_info()[0] is not None: logging.error('Caught Exception') logging.exception('Caught Exception') if options.deploy_undeploy or options.deploy_always_collect_logs: deployer.collect_logs() if options.deploy_undeploy: try: deployer.undeploy() finally: validate_bom__config.teardown_environment(options) else: logging.info('Skipping undeploy because --deploy_undeploy=false') summary = build_report(test_controller) if summary: print(summary) if options.testing_enabled or not outcome_success: # Only record the outcome if we were testing # or if we failed [to deploy/undeploy]. metrics.inc_counter('ValidationControllerOutcome', {'success': outcome_success}) logging.info('Exiting with code=%d', test_controller.exit_code) return test_controller.exit_code def wrapped_main(): options = get_options(sys.argv[1:]) logging.basicConfig( format='%(levelname).1s %(asctime)s.%(msecs)03d' ' [%(threadName)s.%(process)d] %(message)s', datefmt='%H:%M:%S', level=STANDARD_LOG_LEVELS[options.log_level]) logging.debug( 'Running with options:\n %s', '\n '.join(yaml.safe_dump(vars(options), default_flow_style=False) .split('\n'))) metrics = MetricsManager.startup_metrics(options) try: return main(options, metrics) finally: MetricsManager.shutdown_metrics() if __name__ == '__main__': sys.exit(wrapped_main())
#!/usr/bin/python ''' The MIT License (MIT) Copyright (c) 2016 Charles Lin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' #Main method run script for mycn code #See README for additional information on downloading and installing dependencies #Requires linlab pipeline set of utilities # #Requires bamliquidator # #Requires #========================================================================== #=============================DEPENDENCIES================================= #========================================================================== import sys, os # Get the script's full local path whereAmI = os.path.dirname(os.path.realpath(__file__)) pipeline_dir = '/storage/cylin/home/cl6/src/pipeline/' sys.path.append(whereAmI) sys.path.append(pipeline_dir) import pipeline_dfci import utils import string import numpy import os import re from collections import defaultdict #========================================================================== #============================PARAMETERS==================================== #========================================================================== projectName = 'mycn' genome ='hg19' annotFile = '%s/annotation/%s_refseq.ucsc' % (pipeline_dir,genome) #project folders projectFolder = '/storage/cylin/grail/projects/mycn_resub/%s/' % (projectName) #PATH TO YOUR PROJECT FOLDER #standard folder names gffFolder ='%sgff/' % (projectFolder) macsFolder = '%smacsFolder/' % (projectFolder) macsEnrichedFolder = '%smacsEnriched/' % (projectFolder) mappedEnrichedFolder = '%smappedEnriched/' % (projectFolder) mappedFolder = '%smappedFolder/' % (projectFolder) wiggleFolder = '%swiggles/' % (projectFolder) metaFolder = '%smeta/' % (projectFolder) metaRoseFolder = '%smeta_rose/' % (projectFolder) fastaFolder = '%sfasta/' % (projectFolder) bedFolder = '%sbed/' % (projectFolder) figureCodeFolder = '%sfigureCode/' % (projectFolder) figuresFolder = '%sfigures/' % (projectFolder) geneListFolder = '%sgeneListFolder/' % (projectFolder) bedFolder = '%sbeds/' % (projectFolder) signalFolder = '%ssignalTables/' % (projectFolder) tableFolder = '%stables/' % (projectFolder) #mask Files maskFile ='%smasks/hg19_encode_blacklist.bed' % (projectFolder) #genomeDirectory genomeDirectory = '/grail/genomes/Homo_sapiens/UCSC/hg19/Sequence/Chromosomes/' #making folders folderList = [gffFolder,macsFolder,macsEnrichedFolder,mappedEnrichedFolder,mappedFolder,wiggleFolder,metaFolder,metaRoseFolder,fastaFolder,figureCodeFolder,figuresFolder,geneListFolder,bedFolder,signalFolder,tableFolder] for folder in folderList: pipeline_dfci.formatFolder(folder,True) #========================================================================== #============================LIST OF DATAFILES============================= #========================================================================== #this project will utilize multiple datatables #data tables are organized largely by type/system #some data tables overlap for ease of analysis #ATAC-Seq atac_dataFile = '%sdata_tables/ATAC_TABLE.txt' % (projectFolder) #ChIP-Seq be2c_dataFile = '%sdata_tables/BE2C_TABLE.txt' % (projectFolder) mm1s_dataFile = '%sdata_tables/MM1S_TABLE.txt' % (projectFolder) u87_dataFile = '%sdata_tables/U87_TABLE.txt' % (projectFolder) nb_all_chip_dataFile = '%sdata_tables/NB_ALL.txt' % (projectFolder) p4936_young_dataFile = '%sdata_tables/P493-6_YOUNG_TABLE.txt' % (projectFolder) sclc_dataFile = '%sdata_tables/SCLC_DATA_TABLE.txt' % (projectFolder) shep21_dataFile = '%sdata_tables/SHEP21_TABLE.txt' % (projectFolder) shep_on_dataFile = '%sdata_tables/SHEP_ON_TABLE.txt' % (projectFolder) chip_data_list = [be2c_dataFile,mm1s_dataFile,nb_all_chip_dataFile,p4936_young_dataFile,sclc_dataFile,shep21_dataFile,shep_on_dataFile,u87_dataFile] #note: all mouse analysis of THMYCN tumors are in a separate script #CHIP-RX shep21_chiprx_dataFile = '%sdata_tables/SHEP21_CHIPRX_TABLE.txt' % (projectFolder) #RNA-Seq be2c_rna_drug_dataFile = '%sdata_tables/BE2C_RNA_DRUG_TABLE.txt' % (projectFolder) be2c_rna_twist_dataFile = '%sdata_tables/BE2C_RNA_TWIST_TABLE.txt' % (projectFolder) shep21_rna_dataFile = '%sdata_tables/SHEP21_DOX_RNA_TABLE.txt' % (projectFolder) rna_data_list = [be2c_rna_drug_dataFile,be2c_rna_twist_dataFile,shep21_rna_dataFile] all_data_list = [atac_dataFile,be2c_dataFile,mm1s_dataFile,nb_all_chip_dataFile,p4936_young_dataFile,sclc_dataFile,shep21_dataFile,shep_on_dataFile,u87_dataFile,shep21_chiprx_dataFile,be2c_rna_drug_dataFile,be2c_rna_twist_dataFile,shep21_rna_dataFile] #========================================================================== #===========================MAIN METHOD==================================== #========================================================================== def main(): print('main analysis for MYCN project') print('changing directory to project folder') os.chdir(projectFolder) print('\n\n') print('#======================================================================') print('#======================I. CHECKING CHIP-SEQ DATA=======================') print('#======================================================================') print('\n\n') #This section sanity checks each data table and makes sure both bam and .bai files are accessible #for ChIP-Seq #edit all of the data files to absolute path the for dataFile in chip_data_list: pipeline_dfci.summary(dataFile) print('\n\n') print('#======================================================================') print('#======================II. CHECKING RNA-SEQ DATA=======================') print('#======================================================================') print('\n\n') #This section sanity checks each data table and makes sure both bam and .bai files are accessible #for RNA-Seq #edit all of the data files to absolute path the for dataFile in rna_data_list: pipeline_dfci.summary(dataFile) print('\n\n') print('#======================================================================') print('#====================III. CHECKING ATAC-SEQ DATA=======================') print('#======================================================================') print('\n\n') #This section sanity checks each data table and makes sure both bam and .bai files are accessible #for RNA-Seq #edit all of the data files to absolute path the pipeline_dfci.summary(atac_dataFile) print('\n\n') print('#======================================================================') print('#======================IV. CHECKING CHIPRX DATA========================') print('#======================================================================') print('\n\n') pipeline_dfci.summary(shep21_chiprx_dataFile) #========================================================================== #==================================THE END================================= #========================================================================== if __name__=="__main__": main()
import operator import unittest2 from pykafka import protocol from pykafka.common import CompressionType from pykafka.utils.compat import buffer class TestMetadataAPI(unittest2.TestCase): maxDiff = None def test_request(self): req = protocol.MetadataRequest() msg = req.get_bytes() self.assertEqual( msg, bytearray(b'\x00\x00\x00\x15\x00\x03\x00\x00\x00\x00\x00\x00\x00\x07pykafka\x00\x00\x00\x00') ) def test_response(self): cluster = protocol.MetadataResponse( buffer(b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x09localhost\x00\x00#\x84\x00\x00\x00\x01\x00\x00\x00\x04test\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00') ) self.assertEqual(cluster.brokers[0].host, b'localhost') self.assertEqual(cluster.brokers[0].port, 9092) self.assertEqual(cluster.topics[b'test'].partitions[0].leader, cluster.brokers[0].id) self.assertEqual(cluster.topics[b'test'].partitions[0].replicas, [cluster.brokers[0].id]) self.assertEqual(cluster.topics[b'test'].partitions[0].isr, [cluster.brokers[0].id]) def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.MetadataResponse( buffer(b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x09localhost\x00\x00#\x84\x00\x00\x00\x01\x00\x00\x00\x04test\x00\x00\x00\x02\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00') ) self.assertEqual(response.topics[b'test'].partitions[0].err, 3) def test_topic_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.MetadataResponse( buffer(b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x09localhost\x00\x00#\x84\x00\x00\x00\x01\x00\x03\x00\x04test\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00') ) self.assertEqual(response.topics[b'test'].err, 3) class TestProduceAPI(unittest2.TestCase): maxDiff = None test_messages = [ protocol.Message(b'this is a test message', partition_key=b'asdf'), protocol.Message(b'this is also a test message', partition_key=b'test_key'), protocol.Message(b"this doesn't have a partition key"), ] def test_request(self): message = self.test_messages[0] req = protocol.ProduceRequest() req.add_message(message, b'test', 0) msg = req.get_bytes() self.assertEqual( msg, bytearray(b"\x00\x00\x00a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07pykafka\x00\x01\x00\x00\'\x10\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x004\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00(\x0e\x8a\x19O\x00\x00\x00\x00\x00\x04asdf\x00\x00\x00\x16this is a test message") ) def test_gzip_compression(self): req = protocol.ProduceRequest(compression_type=CompressionType.GZIP) [req.add_message(m, b'test_gzip', 0) for m in self.test_messages] msg = req.get_bytes() self.assertEqual(len(msg), 207) # this isn't a good test def test_snappy_compression(self): req = protocol.ProduceRequest(compression_type=CompressionType.SNAPPY) [req.add_message(m, b'test_snappy', 0) for m in self.test_messages] msg = req.get_bytes() self.assertEqual(len(msg), 212) # this isn't a good test def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.ProduceResponse( buffer(b'\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x02') ) self.assertEqual(response.topics[b'test'][0].err, 3) def test_response(self): response = protocol.ProduceResponse( buffer(b'\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02') ) self.assertEqual( response.topics, {b'test': {0: protocol.ProducePartitionResponse(0, 2)}} ) class TestFetchAPI(unittest2.TestCase): maxDiff = None expected_data = [ { 'partition_key': b'asdf', 'compression_type': 0, 'value': b'this is a test message', 'offset': 0, 'partition_id': 0, 'produce_attempt': 0, 'delivery_report_q': None, 'partition': None }, { 'partition_key': b'test_key', 'compression_type': 0, 'value': b'this is also a test message', 'offset': 1, 'partition_id': 0, 'produce_attempt': 0, 'delivery_report_q': None, 'partition': None }, { 'partition_key': None, 'compression_type': 0, 'value': b"this doesn't have a partition key", 'offset': 2, 'partition_id': 0, 'produce_attempt': 0, 'delivery_report_q': None, 'partition': None }] def msg_to_dict(self, msg): """Helper to extract data from Message slots""" attr_names = protocol.Message.__slots__ f = operator.attrgetter(*attr_names) return dict(zip(attr_names, f(msg))) def test_request(self): preq = protocol.PartitionFetchRequest(b'test', 0, 1) req = protocol.FetchRequest(partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray(b'\x00\x00\x00;\x00\x01\x00\x00\x00\x00\x00\x00\x00\x07pykafka\xff\xff\xff\xff\x00\x00\x03\xe8\x00\x00\x04\x00\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x10\x00\x00') ) def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.FetchResponse( buffer(b'\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x006\xa3 ^B\x00\x00\x00\x00\x00\x12test_partition_key\x00\x00\x00\x16this is a test message') ) self.assertEqual(response.topics[b'test'][0].err, 3) def test_response(self): resp = protocol.FetchResponse( buffer(b'\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x006\xa3 ^B\x00\x00\x00\x00\x00\x12test_partition_key\x00\x00\x00\x16this is a test message') ) self.assertEqual(len(resp.topics[b'test'][0].messages), 1) self.assertEqual(resp.topics[b'test'][0].max_offset, 2) message = resp.topics[b'test'][0].messages[0] self.assertEqual(message.value, b'this is a test message') self.assertEqual(message.partition_key, b'test_partition_key') self.assertEqual(message.compression_type, 0) self.assertEqual(message.offset, 1) def test_gzip_decompression(self): msg = b'\x00\x00\x00\x01\x00\ttest_gzip\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x9b\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x8f\xbb\xe7\x1f\xb8\x00\x01\xff\xff\xff\xff\x00\x00\x00\x81\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x00c`\x80\x03\r\xbe.I\x7f0\x8b%\xb18%\rH\x8b\x95dd\x16+\x00Q\xa2BIjq\x89Bnjqqbz*T=#\x10\x1b\xb2\xf3\xcb\xf4\x81y\x1c \x15\xf1\xd9\xa9\x95@\xb64\\_Nq>v\xcdL@\xac\x7f\xb5(\xd9\x98\x81\xe1?\x10\x00y\x8a`M)\xf9\xa9\xc5y\xea%\n\x19\x89e\xa9@\x9d\x05\x89E%\x99%\x99\xf9y\n@\x93\x01N1\x9f[\xac\x00\x00\x00' response = protocol.FetchResponse(msg) for i in range(len(self.expected_data)): self.assertDictEqual( self.msg_to_dict(response.topics[b'test_gzip'][0].messages[i]), self.expected_data[i]) def test_snappy_decompression(self): msg = b'\x00\x00\x00\x01\x00\x0btest_snappy\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\xb5\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\xa9\xc1\xf2\xa3\xe1\x00\x02\xff\xff\xff\xff\x00\x00\x00\x9b\x82SNAPPY\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x87\xac\x01\x00\x00\x19\x01\x10(\x0e\x8a\x19O\x05\x0fx\x04asdf\x00\x00\x00\x16this is a test message\x05$(\x00\x00\x01\x00\x00\x001\x07\x0f\x1c\x8e\x05\x10\x00\x08\x01"\x1c_key\x00\x00\x00\x1b\x158\x08lsoV=\x00H\x02\x00\x00\x00/\xd5rc3\x00\x00\xff\xff\xff\xff\x00\x00\x00!\x055ldoesn\'t have a partition key' response = protocol.FetchResponse(msg) for i in range(len(self.expected_data)): self.assertDictEqual( self.msg_to_dict(response.topics[b'test_snappy'][0].messages[i]), self.expected_data[i]) class TestOffsetAPI(unittest2.TestCase): maxDiff = None def test_request(self): preq = protocol.PartitionOffsetRequest(b'test', 0, -1, 1) req = protocol.OffsetRequest(partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray(b'\x00\x00\x003\x00\x02\x00\x00\x00\x00\x00\x00\x00\x07pykafka\xff\xff\xff\xff\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x01') ) def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.OffsetResponse( buffer(b'\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02') ) self.assertEqual(response.topics[b'test'][0].err, 3) def test_response(self): resp = protocol.OffsetResponse( buffer(b'\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02') ) self.assertEqual(resp.topics[b'test'][0].offset, [2]) class TestOffsetCommitFetchAPI(unittest2.TestCase): maxDiff = None def test_consumer_metadata_request(self): req = protocol.ConsumerMetadataRequest(b'test') msg = req.get_bytes() self.assertEqual( msg, bytearray(b'\x00\x00\x00\x17\x00\n\x00\x00\x00\x00\x00\x00\x00\x07pykafka\x00\x04test') ) def test_consumer_metadata_response(self): response = protocol.ConsumerMetadataResponse( buffer(b'\x00\x00\x00\x00\x00\x00\x00\remmett-debian\x00\x00#\x84') ) self.assertEqual(response.coordinator_id, 0) self.assertEqual(response.coordinator_host, b'emmett-debian') self.assertEqual(response.coordinator_port, 9092) def test_offset_commit_request(self): preq = protocol.PartitionOffsetCommitRequest( b'test', 0, 68, 1426632066, b'testmetadata') req = protocol.OffsetCommitRequest( b'test', 1, b'pykafka', partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray(b'\x00\x00\x00T\x00\x08\x00\x01\x00\x00\x00\x00\x00\x07pykafka\x00\x04test\x00\x00\x00\x01\x00\x07pykafka\x00\x00\x00\x01\x00\x04test\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00D\x00\x00\x00\x00U\x08\xad\x82\x00\x0ctestmetadata') ) def test_offset_commit_response(self): response = protocol.OffsetCommitResponse( buffer(b'\x00\x00\x00\x01\x00\x0cemmett.dummy\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00') ) self.assertEqual(response.topics[b'emmett.dummy'][0].err, 0) def test_offset_fetch_request(self): preq = protocol.PartitionOffsetFetchRequest(b'testtopic', 0) req = protocol.OffsetFetchRequest(b'test', partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray(b'\x00\x00\x00.\x00\t\x00\x01\x00\x00\x00\x00\x00\x07pykafka\x00\x04test\x00\x00\x00\x01\x00\ttesttopic\x00\x00\x00\x01\x00\x00\x00\x00') ) def test_offset_fetch_response(self): response = protocol.OffsetFetchResponse( buffer(b'\x00\x00\x00\x01\x00\x0cemmett.dummy\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00') ) self.assertEqual(response.topics[b'emmett.dummy'][0].metadata, b'') self.assertEqual(response.topics[b'emmett.dummy'][0].offset, 1) if __name__ == '__main__': unittest2.main()
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module defines the events signaled by abinit during the execution. It also provides a parser to extract these events form the main output file and the log file. """ from __future__ import unicode_literals, division, print_function, absolute_import import sys import os.path import datetime import collections import yaml import six import abc import logging import inspect import numpy as np from monty.string import indent, is_string, list_strings from monty.fnmatch import WildCard from monty.termcolor import colored from monty.inspect import all_subclasses from monty.json import MontyDecoder from pymatgen.core import Structure from monty.json import MSONable from pymatgen.serializers.json_coders import pmg_serialize from .abiinspect import YamlTokenizer logger = logging.getLogger(__name__) __all__ = [ "EventsParser", ] def straceback(): """Returns a string with the traceback.""" import traceback return traceback.format_exc() class AbinitEvent(yaml.YAMLObject): """ Example (YAML syntax):: Normal warning without any handler: --- !Warning message: | This is a normal warning that won't trigger any handler in the python code! src_file: routine_name src_line: 112 ... Critical warning that will trigger some action in the python code. --- !ScfConvergeWarning message: | The human-readable message goes here! src_file: foo.F90 src_line: 112 tolname: tolwfr actual_tol: 1.0e-8 required_tol: 1.0e-10 nstep: 50 ... The algorithm to extract the YAML sections is very simple. 1) We use YamlTokenizer to extract the documents from the output file 2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment we know we have encountered a new ABINIT event 3) We parse the document with yaml.load(doc.text) and we get the object Note that: # --- and ... become reserved words (whey they are placed at the begining of a line) since they are used to mark the beginning and the end of YAML documents. # All the possible events should subclass `AbinitEvent` and define the class attribute yaml_tag so that yaml.load will know how to build the instance. """ color = None def __init__(self, src_file, src_line, message): """ Basic constructor for :class:`AbinitEvent`. Args: message: String with human-readable message providing info on the event. src_file: String with the name of the Fortran file where the event is raised. src_line Integer giving the line number in src_file. """ self.message = message self._src_file = src_file self._src_line = src_line #print("src_file", src_file, "src_line", src_line) @pmg_serialize def as_dict(self): return dict(message=self.message, src_file=self.src_file, src_line=self.src_line, yaml_tag=self.yaml_tag) @classmethod def from_dict(cls, d): cls = as_event_class(d.get("yaml_tag")) return cls(**{k: v for k,v in d.items() if k != "yaml_tag" and not k.startswith("@")}) @property def header(self): return "<%s at %s:%s>" % (self.name, self.src_file, self.src_line) def __repr__(self): return self.header def __str__(self): return "\n".join((self.header, self.message)) def __eq__(self, other): if other is None: return False return self.message == other.message def __ne__(self, other): return not self.__eq__(other) @property def src_file(self): """String with the name of the Fortran file where the event is raised.""" try: return self._src_file except AttributeError: return "Unknown" @property def src_line(self): """Integer giving the line number in src_file.""" try: return self._src_line except AttributeError: return "Unknown" @property def name(self): """Name of the event (class name)""" return self.__class__.__name__ @property def baseclass(self): """The baseclass of self.""" for cls in _BASE_CLASSES: if isinstance(self, cls): return cls raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__) def correct(self, task): """ This method is called when an error is detected in a :class:`Task` It should perform any corrective measures relating to the detected error. The idea is similar to the one used in custodian but the handler receives a :class:`Task` object so that we have access to its methods. Returns: (dict) JSON serializable dict that describes the errors and actions taken. E.g. {"errors": list_of_errors, "actions": list_of_actions_taken}. If this is an unfixable error, actions should be set to None. """ return 0 class AbinitComment(AbinitEvent): """Base class for Comment events""" yaml_tag = '!COMMENT' color = "blue" class AbinitError(AbinitEvent): """Base class for Error events""" yaml_tag = '!ERROR' color = "red" class AbinitYamlError(AbinitError): """ Raised if the YAML parser cannot parse the document and the doc tag is an Error. It's an AbinitError because the msg produced by the code is not valid YAML! """ class AbinitBug(AbinitEvent): """Base class for Bug events""" yaml_tag = '!BUG' color = "red" class AbinitWarning(AbinitEvent): """ Base class for Warning events (the most important class). Developers should subclass this class to define the different exceptions raised by the code and the possible actions that can be performed. """ yaml_tag = '!WARNING' color = None class AbinitCriticalWarning(AbinitWarning): color = "red" class AbinitYamlWarning(AbinitCriticalWarning): """ Raised if the YAML parser cannot parse the document and the doc tas is a Warning. """ # Warnings that trigger restart. class ScfConvergenceWarning(AbinitCriticalWarning): """Warning raised when the GS SCF cycle did not converge.""" yaml_tag = '!ScfConvergenceWarning' class NscfConvergenceWarning(AbinitCriticalWarning): """Warning raised when the GS NSCF cycle did not converge.""" yaml_tag = '!NscfConvergenceWarning' class RelaxConvergenceWarning(AbinitCriticalWarning): """Warning raised when the structural relaxation did not converge.""" yaml_tag = '!RelaxConvergenceWarning' # TODO: for the time being we don't discern between GS and PhononCalculations. #class PhononConvergenceWarning(AbinitCriticalWarning): # """Warning raised when the phonon calculation did not converge.""" # yaml_tag = u'!PhononConvergenceWarning' class QPSConvergenceWarning(AbinitCriticalWarning): """Warning raised when the QPS iteration (GW) did not converge.""" yaml_tag = '!QPSConvergenceWarning' class HaydockConvergenceWarning(AbinitCriticalWarning): """Warning raised when the Haydock method (BSE) did not converge.""" yaml_tag = '!HaydockConvergenceWarning' # Error classes providing a correct method. # Register the concrete base classes. _BASE_CLASSES = [ AbinitComment, AbinitError, AbinitBug, AbinitWarning, ] class EventReport(collections.Iterable, MSONable): """ Iterable storing the events raised by an ABINIT calculation. Attributes:: stat: information about a file as returned by os.stat """ def __init__(self, filename, events=None): """ List of ABINIT events. Args: filename: Name of the file events: List of Event objects """ self.filename = os.path.abspath(filename) self.stat = os.stat(self.filename) self.start_datetime, self.end_datetime = None, None self._events = [] self._events_by_baseclass = collections.defaultdict(list) if events is not None: for ev in events: self.append(ev) def __len__(self): return len(self._events) def __iter__(self): return self._events.__iter__() def __getitem__(self, slice): return self._events[slice] def __str__(self): #has_colours = stream_has_colours(stream) has_colours = True lines = [] app = lines.append app("Events found in %s\n" % self.filename) for i, event in enumerate(self): if has_colours: app("[%d] %s" % (i+1, colored(event.header, color=event.color))) app(indent(event.message, 4)) else: app("[%d] %s" % (i+1, str(event))) app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s\n" % ( self.num_errors, self.num_warnings, self.num_comments, self.run_completed)) return "\n".join(lines) def append(self, event): """Add an event to the list.""" self._events.append(event) self._events_by_baseclass[event.baseclass].append(event) def set_run_completed(self, boolean, start_datetime, end_datetime): """Set the value of _run_completed.""" self._run_completed = boolean if (start_datetime, end_datetime) != (None, None): # start_datetime: Sat Feb 28 23:54:27 2015 # end_datetime: Sat Feb 28 23:54:30 2015 try: fmt = "%a %b %d %H:%M:%S %Y" self.start_datetime = datetime.datetime.strptime(start_datetime, fmt) self.end_datetime = datetime.datetime.strptime(end_datetime, fmt) except Exception as exc: # Maybe LOCALE != en_US logger.warning(str(exc)) @property def run_etime(self): """Wall-time of the run as `timedelta` object.""" if self.start_datetime is None or self.end_datetime is None: return None return self.end_datetime - self.start_datetime @property def run_completed(self): """True if the calculation terminated.""" try: return self._run_completed except AttributeError: return False @property def comments(self): """List of comments found.""" return self.select(AbinitComment) @property def errors(self): """List of errors + bugs found.""" return self.select(AbinitError) + self.select(AbinitBug) @property def warnings(self): """List of warnings found.""" return self.select(AbinitWarning) @property def num_warnings(self): """Number of warnings reported.""" return len(self.warnings) @property def num_errors(self): """Number of errors reported.""" return len(self.errors) @property def num_comments(self): """Number of comments reported.""" return len(self.comments) def select(self, base_class): """ Return the list of events that inherits from class base_class """ return self._events_by_baseclass[base_class] def filter_types(self, event_types): events = [] for ev in self: if type(ev) in event_types: events.append(ev) return self.__class__(filename=self.filename, events=events) def get_events_of_type(self, event_class): """Return a list of events of the given class.""" return [ev for ev in self if type(ev) == event_class] @pmg_serialize def as_dict(self): return dict(filename=self.filename, events=[e.as_dict() for e in self._events]) @classmethod def from_dict(cls, d): return cls(filename=d["filename"], events=[AbinitEvent.from_dict(e) for e in d["events"]]) class EventsParserError(Exception): """Base class for the exceptions raised by :class:`EventsParser`.""" class EventsParser(object): """ Parses the output or the log file produced by ABINIT and extract the list of events. """ Error = EventsParserError def parse(self, filename, verbose=0): """ Parse the given file. Return :class:`EventReport`. """ run_completed, start_datetime, end_datetime = False, None, None filename = os.path.abspath(filename) report = EventReport(filename) # TODO Use CamelCase for the Fortran messages. # Bug is still an error of class SoftwareError w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG") with YamlTokenizer(filename) as tokens: for doc in tokens: if w.match(doc.tag): #print("got doc.tag", doc.tag,"--") try: #print(doc.text) event = yaml.load(doc.text) #print(event.yaml_tag, type(event)) except: #raise # Wrong YAML doc. Check tha doc tag and instantiate the proper event. message = "Malformatted YAML document at line: %d\n" % doc.lineno message += doc.text # This call is very expensive when we have many exceptions due to malformatted YAML docs. if verbose: message += "Traceback:\n %s" % straceback() if "error" in doc.tag.lower(): print("It seems an error", doc.tag) event = AbinitYamlError(message=message, src_file=__file__, src_line=0) else: event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0) event.lineno = doc.lineno report.append(event) # Check whether the calculation completed. if doc.tag == "!FinalSummary": run_completed = True d = doc.as_dict() start_datetime, end_datetime = d["start_datetime"], d["end_datetime"] report.set_run_completed(run_completed, start_datetime, end_datetime) return report def report_exception(self, filename, exc): """ This method is used when self.parser raises an Exception so that we can report a customized :class:`EventReport` object with info the exception. """ # Build fake event. event = AbinitError(src_file="Unknown", src_line=0, message=str(exc)) return EventReport(filename, events=[event]) class EventHandler(six.with_metaclass(abc.ABCMeta, object)): """ Abstract base class defining the interface for an EventHandler. The__init__ should always provide default values for its arguments so that we can easily instantiate the handlers with: handlers = [cls() for cls in get_event_handler_classes()] The defaul values should be chosen so to cover the most typical cases. Each EventHandler should define the class attribute `can_change_physics` that is true if the handler changes `important` parameters of the run that are tightly connected to the physics of the system. For example, an `EventHandler` that changes the value of `dilatmx` and prepare the restart is not changing the physics. Similarly a handler that changes the mixing algorithm. On the contrary, a handler that changes the value of the smearing is modifying an important physical parameter, and the user should be made aware of this so that there's an explicit agreement between the user and the code. The default handlers are those that do not change the physics, other handlers can be installed by the user when constructing with the flow with TODO .. warning:: The EventHandler should perform any action at the level of the input files needed to solve the problem and then prepare the task for a new submission The handler should never try to resubmit the task. The submission must be delegated to the scheduler or Fireworks. """ event_class = AbinitEvent """AbinitEvent subclass associated to this handler.""" #can_change_physics FIXED = 1 NOT_FIXED = 0 @classmethod def cls2str(cls): lines = [] app = lines.append ecls = cls.event_class app("event name = %s" % ecls.yaml_tag) app("event documentation: ") lines.extend(ecls.__doc__.split("\n")) app("handler documentation: ") lines.extend(cls.__doc__.split("\n")) return "\n".join(lines) def __str__(self): return "<%s>" % self.__class__.__name__ def can_handle(self, event): """True if this handler is associated to the given :class:`AbinitEvent`""" return self.event_class == event.__class__ # TODO: defined CorrectionRecord object and provide helper functions to build it def count(self, task): """ Return the number of times the event associated to this handler has been already fixed in the :class:`Task`. """ return len([c for c in task.corrections if c["event"]["@class"] == self.event_class]) @abc.abstractmethod def handle_task_event(self, task, event): """ Method to handle Abinit events. Args: task: :class:`Task` object. event: :class:`AbinitEvent` found in the log file. Return: 0 if no action has been applied, 1 if the problem has been fixed. """ @pmg_serialize def as_dict(self): #@Guido this introspection is nice but it's not safe d = {} if hasattr(self, "__init__"): for c in inspect.getargspec(self.__init__).args: if c != "self": d[c] = self.__getattribute__(c) return d @classmethod def from_dict(cls, d): kwargs = {k: v for k, v in d.items() if k in inspect.getargspec(cls.__init__).args} return cls(**kwargs) @classmethod def compare_inputs(cls, new_input, old_input): def vars_dict(d): """ make a simple dictionary and convert numpy arrays to lists """ new_d = {} for key, value in d.items(): if isinstance(value, np.ndarray): value = value.tolist() new_d[key] = value return new_d new_vars = vars_dict(new_input) old_vars = vars_dict(old_input) new_keys = set(new_vars.keys()) old_keys = set(old_vars.keys()) intersect = new_keys.intersection(old_keys) added_keys = new_keys - intersect removed_keys = old_keys - intersect changed_keys = set(v for v in intersect if new_vars[v] != old_vars[v]) log_diff = {} if added_keys: log_diff['_set'] = {k: new_vars[k] for k in added_keys} if changed_keys: log_diff['_update'] = ({k: {'new': new_vars[k], 'old': old_vars[k]} for k in changed_keys}) if new_input.structure != old_input.structure: log_diff['_change_structure'] = new_input.structure.as_dict() if removed_keys: log_diff['_pop'] = {k: old_vars[k] for k in removed_keys} return log_diff class Correction(MSONable): def __init__(self, handler, actions, event, reset=False): self.handler = handler self.actions = actions self.event = event self.reset = reset @pmg_serialize def as_dict(self): return dict(handler=self.handler.as_dict(), actions=self.actions, event=self.event.as_dict(), reset=self.reset) @classmethod def from_dict(cls, d): dec = MontyDecoder() return cls(handler=dec.process_decoded(d['handler']), actions=d['actions'], event=dec.process_decoded(d['event']), reset=d['reset']) #class WarningHandler(EventHandler): # """Base class for handlers associated to ABINIT warnings.""" # event_class = AbinitWarning # #class BugHandler(EventHandler): # """Base class for handlers associated to ABINIT bugs.""" # event_class = AbinitBug class ErrorHandler(EventHandler): """Base class for handlers associated to ABINIT errors.""" event_class = AbinitError _ABC_EVHANDLER_CLASSES = set([ErrorHandler,]) # Public API def autodoc_event_handlers(stream=sys.stdout): """ Print to the given string, the documentation for the events and the associated handlers. """ lines = [] for cls in all_subclasses(EventHandler): if cls in _ABC_EVHANDLER_CLASSES: continue event_class = cls.event_class lines.extend(cls.cls2str().split("\n")) # Here we enforce the abstract protocol of the class # The unit test in tests_events will detect the problem. if not hasattr(cls, "can_change_physics"): raise RuntimeError("%s: can_change_physics must be defined" % cls) stream.write("\n".join(lines) + "\n") def get_event_handler_classes(categories=None): """Return the list of handler classes.""" classes = [c for c in all_subclasses(EventHandler) if c not in _ABC_EVHANDLER_CLASSES] return classes def as_event_class(obj): """ Convert obj into a subclass of AbinitEvent. obj can be either a class or a string with the class name or the YAML tag """ if is_string(obj): for c in all_subclasses(AbinitEvent): if c.__name__ == obj or c.yaml_tag == obj: return c raise ValueError("Cannot find event class associated to %s" % obj) # Assume class. assert obj in all_subclasses(AbinitEvent) return obj ############################################ ########## Concrete classes ################ ############################################ class DilatmxError(AbinitError): """ This Error occurs in variable cell calculations when the increase in the unit cell volume is too large. """ yaml_tag = '!DilatmxError' #def correct(self, task): # #Idea: decrease dilatxm and restart from the last structure. # #We would like to end up with a structures optimized with dilatmx 1.01 # #that will be used for phonon calculations. # if not self.enabled: # task.log_correction(self, "Handler for %s has been disabled") # return 1 # what? # # Read the last structure dumped by ABINIT before aborting. # print("in dilatmx") # filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc") # last_structure = Structure.from_file(filepath) # task._change_structure(last_structure) # #changes = task._modify_vars(dilatmx=1.05) # action = "Take last structure from DILATMX_STRUCT.nc, will restart with dilatmx: %s" % task.get_inpvar("dilatmx") # task.log_correction(self, action) # return 1 class DilatmxErrorHandler(ErrorHandler): """ Handle DilatmxError. Abinit produces a netcdf file with the last structure before aborting The handler changes the structure in the input with the last configuration and modify the value of dilatmx. """ event_class = DilatmxError can_change_physics = False def __init__(self, max_dilatmx=1.3): self.max_dilatmx = max_dilatmx def handle_task_event(self, task, event): # Read the last structure dumped by ABINIT before aborting. filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc") last_structure = Structure.from_file(filepath) task._change_structure(last_structure) #read the suggested dilatmx # new_dilatmx = 1.05 # if new_dilatmx > self.max_dilatmx: # msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx) # return self.NOT_FIXED # task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx) msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx") task.log_correction(event, msg) # Note that we change the structure but we don't try restart from the previous WFK|DEN file # because Abinit called mpi_abort and therefore no final WFK|DEN file has been produced. return self.FIXED def handle_input_event(self, abiinput, outdir, event): try: old_abiinput = abiinput.deepcopy() # Read the last structure dumped by ABINIT before aborting. filepath = outdir.has_abiext("DILATMX_STRUCT.nc") last_structure = Structure.from_file(filepath) abiinput.set_structure(last_structure) #FIXME restart from DEN files not always working with interpolation return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, True) # return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, False) except Exception as exc: logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc) return None """ class DilatmxErrorHandlerTest(ErrorHandler): def __init__(self, max_dilatmx=1.3): self.max_dilatmx = max_dilatmx def handle_task_event(self, task, event): msg = event.message # Check if the handler is suitable to deal with this error if msg.find("You need at least dilatmx=") == -1: return {"status": self.NOT_FIXED, "msg": "{} can not fix event: {}".format(self.__class__, event)} #read the suggested dilatmx try: new_dilatmx = float(msg.split('dilatmx=')[1].split('\n')[0].strip()) except: return {"status": self.NOT_FIXED, "msg": "Couldn't parse dilatmx."} if new_dilatmx > self.max_dilatmx: msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx) return self.NOT_FIXED task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx) msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx") task.log_correction(event, msg) return self.FIXED """ class TolSymError(AbinitError): """ Class of errors raised by Abinit when it cannot detect the symmetries of the system. The handler assumes the structure makes sense and the error is just due to numerical inaccuracies. We increase the value of tolsym in the input file (default 1-8) so that Abinit can find the space group and re-symmetrize the input structure. """ yaml_tag = '!TolSymError' class TolSymErrorHandler(ErrorHandler): """ Increase the value of tolsym in the input file. """ event_class = TolSymError can_change_physics = False def __init__(self, max_nfixes=3): self.max_nfixes = max_nfixes def handle_task_event(self, task, event): # TODO: Add limit on the number of fixes one can do for the same error # For example in this case, the scheduler will stop after 20 submissions if self.count(task) > self.max_nfixes: return self.NOT_FIXED old_tolsym = task.get_inpvar("tolsym") new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10 task._set_inpvars(tolsym=new_tolsym) task.log_correction(event, "Increasing tolsym from %s to %s" % (old_tolsym, new_tolsym)) return self.FIXED def handle_input_event(self, abiinput, outdir, event): try: old_abiinput = abiinput.deepcopy() old_tolsym = abiinput["tolsym"] new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10 abiinput.set_vars(tolsym=new_tolsym) return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, False) except Exception as exc: logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc) return None
from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.urls import reverse from ..categories import category_value from ..models import Rating from ..templatetags.pinax_ratings_tags import ( overall_rating, rating_count, ratings, user_rating, user_rating_js, user_rating_url, ) from .models import Car from .test import TestCase class TemplateTagsTest(TestCase): """ Tests for the template tags """ def setUp(self): self.handling = "handling" self.speed = "speed" self.hamilton = self.make_user(username="lewis_hamilton") self.schumacher = self.make_user(username="michael_schumacher") self.unrated_object = self.make_user(username="unrated_user") self.benz = Car.objects.create(name="Mercedes c200") def create_rating(self, user, rating, obj=None, category=""): """ Helper function to post a rating for the benz object of type Car used in the tests :param user: User :param rating: int :param obj: obj :param category: str :return: http response object """ if not obj: obj = self.benz cat_choice = category_value(obj, category) if not cat_choice: cat_choice = "" Rating.update( rating_object=obj, user=user, category=cat_choice, rating=rating ) def test_user_rating_with_category(self): """ Ensure `user_rating` renders rating posted by specified user. """ # ratings for handling, ensure they are distinct self.create_rating(self.hamilton, 3, category=self.handling) self.create_rating(self.schumacher, 4, category=self.handling) self.assertEqual(user_rating(self.hamilton, self.benz, self.handling), 3) self.assertEqual(user_rating(self.schumacher, self.benz, self.handling), 4) # ratings for speed, different from handling self.create_rating(self.hamilton, 5, category=self.speed) self.create_rating(self.schumacher, 2, category=self.speed) self.assertEqual(user_rating(self.hamilton, self.benz, self.speed), 5) self.assertEqual(user_rating(self.schumacher, self.benz, self.speed), 2) def test_user_rating_on_unrated_object(self): """ Ensure zero is returned from `user_rating` for object without a rating. """ self.assertEqual(user_rating(self.hamilton, self.unrated_object, self.speed), 0) # Same check, no category self.assertEqual(user_rating(self.hamilton, self.unrated_object), 0) def test_user_rating_no_category(self): """ Ensure `user_rating` returns the average of all ratings for object by user when no category is specified. """ # Create first rating, return value should be same self.create_rating(self.schumacher, 5, category=self.handling) self.assertEqual(user_rating(self.schumacher, self.benz), 5) # == (5) / 1 # Add second rating in different category, should be averaged with first self.create_rating(self.schumacher, 3, category=self.speed) self.assertEqual(user_rating(self.schumacher, self.benz), 4) # == (5 + 3) / 2 # Add third rating with no category, should be averaged with first two ratings self.create_rating(self.schumacher, 1) self.assertEqual(user_rating(self.schumacher, self.benz), 3) # == (5 + 3 + 1) / 3 def test_user_rating_revised(self): """ Ensure `user_rating` returns the latest rating for a category. """ # Create first rating, return value should be same self.create_rating(self.schumacher, 5, category=self.handling) self.create_rating(self.schumacher, 2, category=self.handling) self.assertEqual(user_rating(self.schumacher, self.benz), 2) def test_overall_rating_tag_with_category(self): """ Ensure `overall_rating` returns an average rating for a specified category. """ self.create_rating(self.schumacher, 5, category=self.handling) self.create_rating(self.hamilton, 1, category=self.handling) self.assertEqual(overall_rating(self.benz, self.handling), 3) # Add rating for a different category self.create_rating(self.schumacher, 5, category=self.speed) # Overall "handling" rating should be same as before self.assertEqual(overall_rating(self.benz, self.handling), 3) def test_overall_rating_on_unrated_object(self): """ Ensure zero is returned from `overall_rating` for object without a rating. """ self.assertEqual(overall_rating(self.unrated_object, self.speed), 0) # Same check, no category self.assertEqual(overall_rating(self.unrated_object), 0) def test_overall_rating_tag_with_no_category(self): """ Ensure `overall_rating` returns the average of all ratings for object by user when no category is specified. """ # Create first rating, return value should be same self.create_rating(self.schumacher, 5, category=self.handling) self.assertEqual(overall_rating(self.benz), 5) # == (5) / 1 # Add second rating in different category, should be averaged with first self.create_rating(self.schumacher, 3, category=self.speed) self.assertEqual(overall_rating(self.benz), 4) # == (5 + 3) / 2 # Add third rating with no category, should be averaged with first two ratings self.create_rating(self.schumacher, 1) self.assertEqual(overall_rating(self.benz), 3) # == (5 + 3 + 1) / 3 def test_ratings_tag(self): """ Ensure QuerySet of all Ratings for self.benz is returned """ self.create_rating(self.schumacher, 5) self.create_rating(self.hamilton, 5) content_type = ContentType.objects.get_for_model(self.benz) output = ratings(self.benz) expected = Rating.objects.filter( content_type=content_type, object_id=self.benz.pk ) self.assertEqual(len(expected), 2) self.assertSetEqual(set(output), set(expected)) def test_ratings_tag_with_not_rated_object(self): """ Ensure empty list is returned for object without Ratings """ self.assertEqual(ratings(self.unrated_object), []) def test_user_rating_url_tag(self): """ Ensure `user_rating_url` returns correct URL for user to post a rating """ tag_url = user_rating_url(self.hamilton, self.benz) expected_path = reverse( "pinax_ratings:rate", kwargs={ "content_type_id": ContentType.objects.get_for_model(self.benz).pk, "object_id": self.benz.pk }) self.assertEqual(tag_url, expected_path) def test_rating_count_tag(self): """ Ensure `rating_count` returns the number of ratings on an object regardless of who rated and regardless of category. """ self.create_rating(self.schumacher, 5) self.create_rating(self.schumacher, 5, category=self.speed) self.create_rating(self.schumacher, 5, obj=self.unrated_object) # should not be included self.create_rating(self.hamilton, 5) self.create_rating(self.hamilton, 5, category=self.handling) count = rating_count(self.benz) self.assertEqual(count, 4) def test_user_rating_js_tag(self): """ Ensure the correct context is returned """ self.create_rating(self.schumacher, 5, category=self.speed) context = user_rating_js(self.schumacher, self.benz, self.speed) self.assertEqual(context["obj"], self.benz) self.assertEqual(context["category"], self.speed) self.assertEqual(context["the_user_rating"], 5) self.assertEqual(context["STATIC_URL"], settings.STATIC_URL) self.assertEqual( context["post_url"], reverse( "pinax_ratings:rate", kwargs={ "content_type_id": ContentType.objects.get_for_model(self.benz).pk, "object_id": self.benz.pk } ))
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from oslo.utils import strutils from trove.common import exception from trove.common import pagination from trove.common import wsgi from trove.common.utils import correct_id_with_req from trove.extensions.mysql.common import populate_validated_databases from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import unquote_user_host from trove.extensions.mysql import models from trove.extensions.mysql import views from trove.guestagent.db import models as guest_models from trove.openstack.common import log as logging from trove.common.i18n import _ import trove.common.apischema as apischema LOG = logging.getLogger(__name__) class RootController(wsgi.Controller): """Controller for instance functionality.""" def index(self, req, tenant_id, instance_id): """Returns True if root is enabled for the given instance; False otherwise. """ LOG.info(_("Getting root enabled for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] is_root_enabled = models.Root.load(context, instance_id) return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200) def create(self, req, tenant_id, instance_id): """Enable the root user for the db instance.""" LOG.info(_("Enabling root for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] user_name = context.user root = models.Root.create(context, instance_id, user_name) return wsgi.Result(views.RootCreatedView(root).data(), 200) class UserController(wsgi.Controller): """Controller for instance functionality.""" schemas = apischema.user @classmethod def get_schema(cls, action, body): action_schema = super(UserController, cls).get_schema(action, body) if 'update_all' == action: update_type = body.keys()[0] action_schema = action_schema.get(update_type, {}) return action_schema def index(self, req, tenant_id, instance_id): """Return all users.""" LOG.info(_("Listing users for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] users, next_marker = models.Users.load(context, instance_id) view = views.UsersView(users) paged = pagination.SimplePaginatedDataView(req.url, 'users', view, next_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id, instance_id): """Creates a set of users.""" LOG.info(_("Creating users for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req)) LOG.info(_("body : '%s'\n\n") % strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] users = body['users'] try: model_users = populate_users(users) models.User.create(context, instance_id, model_users) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, id): LOG.info(_("Deleting user for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] id = correct_id_with_req(id, req) username, host = unquote_user_host(id) user = None try: user = guest_models.MySQLUser() user.name = username user.host = host found_user = models.User.load(context, instance_id, username, host) if not found_user: user = None except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=id) models.User.delete(context, instance_id, user.serialize()) return wsgi.Result(None, 202) def show(self, req, tenant_id, instance_id, id): """Return a single user.""" LOG.info(_("Showing a user for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] id = correct_id_with_req(id, req) username, host = unquote_user_host(id) user = None try: user = models.User.load(context, instance_id, username, host) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=id) view = views.UserView(user) return wsgi.Result(view.data(), 200) def update(self, req, body, tenant_id, instance_id, id): """Change attributes for one user.""" LOG.info(_("Updating user attributes for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req)) context = req.environ[wsgi.CONTEXT_KEY] id = correct_id_with_req(id, req) username, hostname = unquote_user_host(id) user = None user_attrs = body['user'] try: user = models.User.load(context, instance_id, username, hostname) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=id) try: models.User.update_attributes(context, instance_id, username, hostname, user_attrs) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202) def update_all(self, req, body, tenant_id, instance_id): """Change the password of one or more users.""" LOG.info(_("Updating user passwords for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req)) context = req.environ[wsgi.CONTEXT_KEY] users = body['users'] model_users = [] for user in users: try: mu = guest_models.MySQLUser() mu.name = user['name'] mu.host = user.get('host') mu.password = user['password'] found_user = models.User.load(context, instance_id, mu.name, mu.host) if not found_user: user_and_host = mu.name if mu.host: user_and_host += '@' + mu.host raise exception.UserNotFound(uuid=user_and_host) model_users.append(mu) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) models.User.change_password(context, instance_id, model_users) return wsgi.Result(None, 202) class UserAccessController(wsgi.Controller): """Controller for adding and removing database access for a user.""" schemas = apischema.user @classmethod def get_schema(cls, action, body): schema = {} if 'update_all' == action: schema = cls.schemas.get(action).get('databases') return schema def _get_user(self, context, instance_id, user_id): username, hostname = unquote_user_host(user_id) try: user = models.User.load(context, instance_id, username, hostname) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) if not user: raise exception.UserNotFound(uuid=user_id) return user def index(self, req, tenant_id, instance_id, user_id): """Show permissions for the given user.""" LOG.info(_("Showing user access for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] # Make sure this user exists. user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error(_("No such user: %(user)s ") % {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) access = models.User.access(context, instance_id, username, hostname) view = views.UserAccessView(access.databases) return wsgi.Result(view.data(), 200) def update(self, req, body, tenant_id, instance_id, user_id): """Grant access for a user to one or more databases.""" LOG.info(_("Granting user access for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error(_("No such user: %(user)s ") % {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) databases = [db['name'] for db in body['databases']] models.User.grant(context, instance_id, username, hostname, databases) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, user_id, id): """Revoke access for a user.""" LOG.info(_("Revoking user access for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error(_("No such user: %(user)s ") % {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) access = models.User.access(context, instance_id, username, hostname) databases = [db.name for db in access.databases] if id not in databases: raise exception.DatabaseNotFound(uuid=id) models.User.revoke(context, instance_id, username, hostname, id) return wsgi.Result(None, 202) class SchemaController(wsgi.Controller): """Controller for instance functionality.""" schemas = apischema.dbschema def index(self, req, tenant_id, instance_id): """Return all schemas.""" LOG.info(_("Listing schemas for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] schemas, next_marker = models.Schemas.load(context, instance_id) view = views.SchemasView(schemas) paged = pagination.SimplePaginatedDataView(req.url, 'databases', view, next_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id, instance_id): """Creates a set of schemas.""" LOG.info(_("Creating schema for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("body : '%s'\n\n") % body) context = req.environ[wsgi.CONTEXT_KEY] schemas = body['databases'] model_schemas = populate_validated_databases(schemas) models.Schema.create(context, instance_id, model_schemas) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, id): LOG.info(_("Deleting schema for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % req) context = req.environ[wsgi.CONTEXT_KEY] try: schema = guest_models.ValidatedMySQLDatabase() schema.name = id models.Schema.delete(context, instance_id, schema.serialize()) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202) def show(self, req, tenant_id, instance_id, id): raise webob.exc.HTTPNotImplemented()
# sqlalchemy/pool.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Connection pooling for DB-API connections. Provides a number of connection pool implementations for a variety of usage scenarios and thread behavior requirements imposed by the application, DB-API or database itself. Also provides a DB-API 2.0 connection proxying mechanism allowing regular DB-API connect() methods to be transparently managed by a SQLAlchemy connection pool. """ import time import traceback import weakref from . import exc, log, event, events, interfaces, util from .util import queue as sqla_queue from .util import threading, memoized_property, \ chop_traceback proxies = {} def manage(module, **params): """Return a proxy for a DB-API module that automatically pools connections. Given a DB-API 2.0 module and pool management parameters, returns a proxy for the module that will automatically pool connections, creating new connection pools for each distinct set of connection arguments sent to the decorated module's connect() function. :param module: a DB-API 2.0 database module :param poolclass: the class used by the pool module to provide pooling. Defaults to :class:`.QueuePool`. :param \*\*params: will be passed through to *poolclass* """ try: return proxies[module] except KeyError: return proxies.setdefault(module, _DBProxy(module, **params)) def clear_managers(): """Remove all current DB-API 2.0 managers. All pools and connections are disposed. """ for manager in proxies.values(): manager.close() proxies.clear() reset_rollback = util.symbol('reset_rollback') reset_commit = util.symbol('reset_commit') reset_none = util.symbol('reset_none') class _ConnDialect(object): """partial implementation of :class:`.Dialect` which provides DBAPI connection methods. When a :class:`.Pool` is combined with an :class:`.Engine`, the :class:`.Engine` replaces this with its own :class:`.Dialect`. """ def do_rollback(self, dbapi_connection): dbapi_connection.rollback() def do_commit(self, dbapi_connection): dbapi_connection.commit() def do_close(self, dbapi_connection): dbapi_connection.close() class Pool(log.Identified): """Abstract base class for connection pools.""" _dialect = _ConnDialect() def __init__(self, creator, recycle=-1, echo=None, use_threadlocal=False, logging_name=None, reset_on_return=True, listeners=None, events=None, _dispatch=None, _dialect=None): """ Construct a Pool. :param creator: a callable function that returns a DB-API connection object. The function will be called with parameters. :param recycle: If set to non -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1. :param logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.pool" logger. Defaults to a hexstring of the object's id. :param echo: If True, connections being pulled and retrieved from the pool will be logged to the standard output, as well as pool sizing information. Echoing can also be achieved by enabling logging for the "sqlalchemy.pool" namespace. Defaults to False. :param use_threadlocal: If set to True, repeated calls to :meth:`connect` within the same application thread will be guaranteed to return the same connection object, if one has already been retrieved from the pool and has not been returned yet. Offers a slight performance advantage at the cost of individual transactions by default. The :meth:`unique_connection` method is provided to bypass the threadlocal behavior installed into :meth:`connect`. :param reset_on_return: Configures the action to take on connections as they are returned to the pool. See the argument description in :class:`.QueuePool` for more detail. :param events: a list of 2-tuples, each of the form ``(callable, target)`` which will be passed to event.listen() upon construction. Provided here so that event listeners can be assigned via ``create_engine`` before dialect-level listeners are applied. :param listeners: Deprecated. A list of :class:`~sqlalchemy.interfaces.PoolListener`-like objects or dictionaries of callables that receive events when DB-API connections are created, checked out and checked in to the pool. This has been superseded by :func:`~sqlalchemy.event.listen`. """ if logging_name: self.logging_name = self._orig_logging_name = logging_name else: self._orig_logging_name = None log.instance_logger(self, echoflag=echo) self._threadconns = threading.local() self._creator = creator self._recycle = recycle self._use_threadlocal = use_threadlocal if reset_on_return in ('rollback', True, reset_rollback): self._reset_on_return = reset_rollback elif reset_on_return in (None, False, reset_none): self._reset_on_return = reset_none elif reset_on_return in ('commit', reset_commit): self._reset_on_return = reset_commit else: raise exc.ArgumentError( "Invalid value for 'reset_on_return': %r" % reset_on_return) self.echo = echo if _dispatch: self.dispatch._update(_dispatch, only_propagate=False) if _dialect: self._dialect = _dialect if events: for fn, target in events: event.listen(self, target, fn) if listeners: util.warn_deprecated( "The 'listeners' argument to Pool (and " "create_engine()) is deprecated. Use event.listen().") for l in listeners: self.add_listener(l) dispatch = event.dispatcher(events.PoolEvents) def _close_connection(self, connection): self.logger.debug("Closing connection %r", connection) try: self._dialect.do_close(connection) except (SystemExit, KeyboardInterrupt): raise except: self.logger.debug("Exception closing connection %r", connection) @util.deprecated( 2.7, "Pool.add_listener is deprecated. Use event.listen()") def add_listener(self, listener): """Add a :class:`.PoolListener`-like object to this pool. ``listener`` may be an object that implements some or all of PoolListener, or a dictionary of callables containing implementations of some or all of the named methods in PoolListener. """ interfaces.PoolListener._adapt_listener(self, listener) def unique_connection(self): """Produce a DBAPI connection that is not referenced by any thread-local context. This method is different from :meth:`.Pool.connect` only if the ``use_threadlocal`` flag has been set to ``True``. """ return _ConnectionFairy(self).checkout() def _create_connection(self): """Called by subclasses to create a new ConnectionRecord.""" return _ConnectionRecord(self) def recreate(self): """Return a new :class:`.Pool`, of the same class as this one and configured with identical creation arguments. This method is used in conjunection with :meth:`dispose` to close out an entire :class:`.Pool` and create a new one in its place. """ raise NotImplementedError() def dispose(self): """Dispose of this pool. This method leaves the possibility of checked-out connections remaining open, as it only affects connections that are idle in the pool. See also the :meth:`Pool.recreate` method. """ raise NotImplementedError() def _replace(self): """Dispose + recreate this pool. Subclasses may employ special logic to move threads waiting on this pool to the new one. """ self.dispose() return self.recreate() def connect(self): """Return a DBAPI connection from the pool. The connection is instrumented such that when its ``close()`` method is called, the connection will be returned to the pool. """ if not self._use_threadlocal: return _ConnectionFairy(self).checkout() try: rec = self._threadconns.current() if rec: return rec.checkout() except AttributeError: pass agent = _ConnectionFairy(self) self._threadconns.current = weakref.ref(agent) return agent.checkout() def _return_conn(self, record): """Given a _ConnectionRecord, return it to the :class:`.Pool`. This method is called when an instrumented DBAPI connection has its ``close()`` method called. """ if self._use_threadlocal: try: del self._threadconns.current except AttributeError: pass self._do_return_conn(record) def _do_get(self): """Implementation for :meth:`get`, supplied by subclasses.""" raise NotImplementedError() def _do_return_conn(self, conn): """Implementation for :meth:`return_conn`, supplied by subclasses.""" raise NotImplementedError() def status(self): raise NotImplementedError() class _ConnectionRecord(object): finalize_callback = None def __init__(self, pool): self.__pool = pool self.connection = self.__connect() pool.dispatch.first_connect.\ for_modify(pool.dispatch).\ exec_once(self.connection, self) pool.dispatch.connect(self.connection, self) @util.memoized_property def info(self): return {} def close(self): if self.connection is not None: self.__pool._close_connection(self.connection) def invalidate(self, e=None): if e is not None: self.__pool.logger.info( "Invalidate connection %r (reason: %s:%s)", self.connection, e.__class__.__name__, e) else: self.__pool.logger.info( "Invalidate connection %r", self.connection) self.__close() self.connection = None def get_connection(self): if self.connection is None: self.connection = self.__connect() self.info.clear() if self.__pool.dispatch.connect: self.__pool.dispatch.connect(self.connection, self) elif self.__pool._recycle > -1 and \ time.time() - self.starttime > self.__pool._recycle: self.__pool.logger.info( "Connection %r exceeded timeout; recycling", self.connection) self.__close() self.connection = self.__connect() self.info.clear() if self.__pool.dispatch.connect: self.__pool.dispatch.connect(self.connection, self) return self.connection def checkin(self): self.fairy = None connection = self.connection pool = self.__pool if self.finalize_callback: self.finalize_callback(connection) del self.finalize_callback if pool.dispatch.checkin: pool.dispatch.checkin(connection, self) pool._return_conn(self) def __close(self): self.__pool._close_connection(self.connection) def __connect(self): try: self.starttime = time.time() connection = self.__pool._creator() self.__pool.logger.debug("Created new connection %r", connection) return connection except Exception as e: self.__pool.logger.debug("Error on connect(): %s", e) raise def _finalize_fairy(connection, connection_record, pool, ref, echo): _refs.discard(connection_record) if ref is not None and \ connection_record.fairy is not ref: return if connection_record and echo: pool.logger.debug("Connection %r being returned to pool", connection) if connection is not None: try: if pool.dispatch.reset: pool.dispatch.reset(connection, connection_record) if pool._reset_on_return is reset_rollback: pool._dialect.do_rollback(connection) elif pool._reset_on_return is reset_commit: pool._dialect.do_commit(connection) # Immediately close detached instances if connection_record is None: pool._close_connection(connection) except Exception as e: if connection_record is not None: connection_record.invalidate(e=e) if isinstance(e, (SystemExit, KeyboardInterrupt)): raise if connection_record: connection_record.checkin() _refs = set() class _ConnectionFairy(object): """Proxies a DB-API connection and provides return-on-dereference support.""" def __init__(self, pool): self._pool = pool self.__counter = 0 self._echo = _echo = pool._should_log_debug() try: rec = self._connection_record = pool._do_get() try: conn = self.connection = self._connection_record.get_connection() except: self._connection_record.checkin() raise rec.fairy = weakref.ref( self, lambda ref: _finalize_fairy and \ _finalize_fairy(conn, rec, pool, ref, _echo) ) _refs.add(rec) except: # helps with endless __getattr__ loops later on self.connection = None self._connection_record = None raise if self._echo: self._pool.logger.debug("Connection %r checked out from pool" % self.connection) @property def _logger(self): return self._pool.logger @property def is_valid(self): return self.connection is not None @util.memoized_property def info(self): """Info dictionary associated with the underlying DBAPI connection referred to by this :class:`.ConnectionFairy`, allowing user-defined data to be associated with the connection. The data here will follow along with the DBAPI connection including after it is returned to the connection pool and used again in subsequent instances of :class:`.ConnectionFairy`. """ try: return self._connection_record.info except AttributeError: raise exc.InvalidRequestError("This connection is closed") def invalidate(self, e=None): """Mark this connection as invalidated. The connection will be immediately closed. The containing ConnectionRecord will create a new connection when next used. """ if self.connection is None: raise exc.InvalidRequestError("This connection is closed") if self._connection_record is not None: self._connection_record.invalidate(e=e) self.connection = None self._close() def cursor(self, *args, **kwargs): return self.connection.cursor(*args, **kwargs) def __getattr__(self, key): return getattr(self.connection, key) def checkout(self): if self.connection is None: raise exc.InvalidRequestError("This connection is closed") self.__counter += 1 if not self._pool.dispatch.checkout or self.__counter != 1: return self # Pool listeners can trigger a reconnection on checkout attempts = 2 while attempts > 0: try: self._pool.dispatch.checkout(self.connection, self._connection_record, self) return self except exc.DisconnectionError as e: self._pool.logger.info( "Disconnection detected on checkout: %s", e) self._connection_record.invalidate(e) self.connection = self._connection_record.get_connection() attempts -= 1 self._pool.logger.info("Reconnection attempts exhausted on checkout") self.invalidate() raise exc.InvalidRequestError("This connection is closed") def detach(self): """Separate this connection from its Pool. This means that the connection will no longer be returned to the pool when closed, and will instead be literally closed. The containing ConnectionRecord is separated from the DB-API connection, and will create a new connection when next used. Note that any overall connection limiting constraints imposed by a Pool implementation may be violated after a detach, as the detached connection is removed from the pool's knowledge and control. """ if self._connection_record is not None: _refs.remove(self._connection_record) self._connection_record.fairy = None self._connection_record.connection = None self._pool._do_return_conn(self._connection_record) self.info = self.info.copy() self._connection_record = None def close(self): self.__counter -= 1 if self.__counter == 0: self._close() def _close(self): _finalize_fairy(self.connection, self._connection_record, self._pool, None, self._echo) self.connection = None self._connection_record = None class SingletonThreadPool(Pool): """A Pool that maintains one connection per thread. Maintains one connection per each thread, never moving a connection to a thread other than the one which it was created in. Options are the same as those of :class:`.Pool`, as well as: :param pool_size: The number of threads in which to maintain connections at once. Defaults to five. :class:`.SingletonThreadPool` is used by the SQLite dialect automatically when a memory-based database is used. See :ref:`sqlite_toplevel`. """ def __init__(self, creator, pool_size=5, **kw): kw['use_threadlocal'] = True Pool.__init__(self, creator, **kw) self._conn = threading.local() self._all_conns = set() self.size = pool_size def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, pool_size=self.size, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, _dispatch=self.dispatch, _dialect=self._dialect) def dispose(self): """Dispose of this pool.""" for conn in self._all_conns: try: conn.close() except (SystemExit, KeyboardInterrupt): raise except: # pysqlite won't even let you close a conn from a thread # that didn't create it pass self._all_conns.clear() def _cleanup(self): while len(self._all_conns) > self.size: c = self._all_conns.pop() c.close() def status(self): return "SingletonThreadPool id:%d size: %d" % \ (id(self), len(self._all_conns)) def _do_return_conn(self, conn): pass def _do_get(self): try: c = self._conn.current() if c: return c except AttributeError: pass c = self._create_connection() self._conn.current = weakref.ref(c) self._all_conns.add(c) if len(self._all_conns) > self.size: self._cleanup() return c class DummyLock(object): def acquire(self, wait=True): return True def release(self): pass class QueuePool(Pool): """A :class:`.Pool` that imposes a limit on the number of open connections. :class:`.QueuePool` is the default pooling implementation used for all :class:`.Engine` objects, unless the SQLite dialect is in use. """ def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw): """ Construct a QueuePool. :param creator: a callable function that returns a DB-API connection object. The function will be called with parameters. :param pool_size: The size of the pool to be maintained, defaults to 5. This is the largest number of connections that will be kept persistently in the pool. Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain. ``pool_size`` can be set to 0 to indicate no size limit; to disable pooling, use a :class:`~sqlalchemy.pool.NullPool` instead. :param max_overflow: The maximum overflow size of the pool. When the number of checked-out connections reaches the size set in pool_size, additional connections will be returned up to this limit. When those additional connections are returned to the pool, they are disconnected and discarded. It follows then that the total number of simultaneous connections the pool will allow is pool_size + `max_overflow`, and the total number of "sleeping" connections the pool will allow is pool_size. `max_overflow` can be set to -1 to indicate no overflow limit; no limit will be placed on the total number of concurrent connections. Defaults to 10. :param timeout: The number of seconds to wait before giving up on returning a connection. Defaults to 30. :param recycle: If set to non -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1. :param echo: If True, connections being pulled and retrieved from the pool will be logged to the standard output, as well as pool sizing information. Echoing can also be achieved by enabling logging for the "sqlalchemy.pool" namespace. Defaults to False. :param use_threadlocal: If set to True, repeated calls to :meth:`connect` within the same application thread will be guaranteed to return the same connection object, if one has already been retrieved from the pool and has not been returned yet. Offers a slight performance advantage at the cost of individual transactions by default. The :meth:`unique_connection` method is provided to bypass the threadlocal behavior installed into :meth:`connect`. :param reset_on_return: Determine steps to take on connections as they are returned to the pool. reset_on_return can have any of these values: * 'rollback' - call rollback() on the connection, to release locks and transaction resources. This is the default value. The vast majority of use cases should leave this value set. * True - same as 'rollback', this is here for backwards compatibility. * 'commit' - call commit() on the connection, to release locks and transaction resources. A commit here may be desirable for databases that cache query plans if a commit is emitted, such as Microsoft SQL Server. However, this value is more dangerous than 'rollback' because any data changes present on the transaction are committed unconditionally. * None - don't do anything on the connection. This setting should only be made on a database that has no transaction support at all, namely MySQL MyISAM. By not doing anything, performance can be improved. This setting should **never be selected** for a database that supports transactions, as it will lead to deadlocks and stale state. * False - same as None, this is here for backwards compatibility. .. versionchanged:: 0.7.6 ``reset_on_return`` accepts values. :param listeners: A list of :class:`~sqlalchemy.interfaces.PoolListener`-like objects or dictionaries of callables that receive events when DB-API connections are created, checked out and checked in to the pool. """ Pool.__init__(self, creator, **kw) self._pool = sqla_queue.Queue(pool_size) self._overflow = 0 - pool_size self._max_overflow = max_overflow self._timeout = timeout self._overflow_lock = threading.Lock() if self._max_overflow > -1 \ else DummyLock() def _do_return_conn(self, conn): try: self._pool.put(conn, False) except sqla_queue.Full: conn.close() self._overflow_lock.acquire() try: self._overflow -= 1 finally: self._overflow_lock.release() def _do_get(self): try: wait = self._max_overflow > -1 and \ self._overflow >= self._max_overflow return self._pool.get(wait, self._timeout) except sqla_queue.SAAbort as aborted: return aborted.context._do_get() except sqla_queue.Empty: if self._max_overflow > -1 and \ self._overflow >= self._max_overflow: if not wait: return self._do_get() else: raise exc.TimeoutError( "QueuePool limit of size %d overflow %d reached, " "connection timed out, timeout %d" % (self.size(), self.overflow(), self._timeout)) self._overflow_lock.acquire() try: if self._max_overflow > -1 and \ self._overflow >= self._max_overflow: return self._do_get() else: con = self._create_connection() self._overflow += 1 return con finally: self._overflow_lock.release() def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, _dispatch=self.dispatch, _dialect=self._dialect) def dispose(self): while True: try: conn = self._pool.get(False) conn.close() except sqla_queue.Empty: break self._overflow = 0 - self.size() self.logger.info("Pool disposed. %s", self.status()) def _replace(self): self.dispose() np = self.recreate() self._pool.abort(np) return np def status(self): return "Pool size: %d Connections in pool: %d "\ "Current Overflow: %d Current Checked out "\ "connections: %d" % (self.size(), self.checkedin(), self.overflow(), self.checkedout()) def size(self): return self._pool.maxsize def checkedin(self): return self._pool.qsize() def overflow(self): return self._overflow def checkedout(self): return self._pool.maxsize - self._pool.qsize() + self._overflow class NullPool(Pool): """A Pool which does not pool connections. Instead it literally opens and closes the underlying DB-API connection per each connection open/close. Reconnect-related functions such as ``recycle`` and connection invalidation are not supported by this Pool implementation, since no connections are held persistently. .. versionchanged:: 0.7 :class:`.NullPool` is used by the SQlite dialect automatically when a file-based database is used. See :ref:`sqlite_toplevel`. """ def status(self): return "NullPool" def _do_return_conn(self, conn): conn.close() def _do_get(self): return self._create_connection() def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, _dispatch=self.dispatch, _dialect=self._dialect) def dispose(self): pass class StaticPool(Pool): """A Pool of exactly one connection, used for all requests. Reconnect-related functions such as ``recycle`` and connection invalidation (which is also used to support auto-reconnect) are not currently supported by this Pool implementation but may be implemented in a future release. """ @memoized_property def _conn(self): return self._creator() @memoized_property def connection(self): return _ConnectionRecord(self) def status(self): return "StaticPool" def dispose(self): if '_conn' in self.__dict__: self._conn.close() self._conn = None def recreate(self): self.logger.info("Pool recreating") return self.__class__(creator=self._creator, recycle=self._recycle, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, echo=self.echo, logging_name=self._orig_logging_name, _dispatch=self.dispatch, _dialect=self._dialect) def _create_connection(self): return self._conn def _do_return_conn(self, conn): pass def _do_get(self): return self.connection class AssertionPool(Pool): """A :class:`.Pool` that allows at most one checked out connection at any given time. This will raise an exception if more than one connection is checked out at a time. Useful for debugging code that is using more connections than desired. .. versionchanged:: 0.7 :class:`.AssertionPool` also logs a traceback of where the original connection was checked out, and reports this in the assertion error raised. """ def __init__(self, *args, **kw): self._conn = None self._checked_out = False self._store_traceback = kw.pop('store_traceback', True) self._checkout_traceback = None Pool.__init__(self, *args, **kw) def status(self): return "AssertionPool" def _do_return_conn(self, conn): if not self._checked_out: raise AssertionError("connection is not checked out") self._checked_out = False assert conn is self._conn def dispose(self): self._checked_out = False if self._conn: self._conn.close() def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, echo=self.echo, logging_name=self._orig_logging_name, _dispatch=self.dispatch, _dialect=self._dialect) def _do_get(self): if self._checked_out: if self._checkout_traceback: suffix = ' at:\n%s' % ''.join( chop_traceback(self._checkout_traceback)) else: suffix = '' raise AssertionError("connection is already checked out" + suffix) if not self._conn: self._conn = self._create_connection() self._checked_out = True if self._store_traceback: self._checkout_traceback = traceback.format_stack() return self._conn class _DBProxy(object): """Layers connection pooling behavior on top of a standard DB-API module. Proxies a DB-API 2.0 connect() call to a connection pool keyed to the specific connect parameters. Other functions and attributes are delegated to the underlying DB-API module. """ def __init__(self, module, poolclass=QueuePool, **kw): """Initializes a new proxy. module a DB-API 2.0 module poolclass a Pool class, defaulting to QueuePool Other parameters are sent to the Pool object's constructor. """ self.module = module self.kw = kw self.poolclass = poolclass self.pools = {} self._create_pool_mutex = threading.Lock() def close(self): for key in list(self.pools.keys()): del self.pools[key] def __del__(self): self.close() def __getattr__(self, key): return getattr(self.module, key) def get_pool(self, *args, **kw): key = self._serialize(*args, **kw) try: return self.pools[key] except KeyError: self._create_pool_mutex.acquire() try: if key not in self.pools: kw.pop('sa_pool_key', None) pool = self.poolclass(lambda: self.module.connect(*args, **kw), **self.kw) self.pools[key] = pool return pool else: return self.pools[key] finally: self._create_pool_mutex.release() def connect(self, *args, **kw): """Activate a connection to the database. Connect to the database using this DBProxy's module and the given connect arguments. If the arguments match an existing pool, the connection will be returned from the pool's current thread-local connection instance, or if there is no thread-local connection instance it will be checked out from the set of pooled connections. If the pool has no available connections and allows new connections to be created, a new database connection will be made. """ return self.get_pool(*args, **kw).connect() def dispose(self, *args, **kw): """Dispose the pool referenced by the given connect arguments.""" key = self._serialize(*args, **kw) try: del self.pools[key] except KeyError: pass def _serialize(self, *args, **kw): if "sa_pool_key" in kw: return kw['sa_pool_key'] return tuple( list(args) + [(k, kw[k]) for k in sorted(kw)] )
"""Client interface for MongoDB.""" import itertools import os import shutil import subprocess import sys import time from collections import defaultdict from copy import deepcopy from pathlib import Path from tempfile import TemporaryDirectory from ruamel.yaml import YAML # # setup mongo # try: import pymongo MONGO_AVAILABLE = True except ImportError: print( "pymongo not found. Please install it following the instructions " "https://pymongo.readthedocs.io/en/stable/installation.html" ) MONGO_AVAILABLE = False from pymongo.collection import Collection from regolith.tools import dbpathname, fallback from regolith import fsclient if not MONGO_AVAILABLE: ON_PYMONGO_V2 = ON_PYMONGO_V3 = False elif pymongo.version.split(".")[0] == "2": ON_PYMONGO_V2 = True ON_PYMONGO_V3 = False else: ON_PYMONGO_V2 = False ON_PYMONGO_V3 = True def import_jsons(dbpath: str, dbname: str, host: str = None, uri: str = None) -> None: """Import the json files to mongo db. Each json file will be a collection in the database. The _id will be the same as it is in the json file. Parameters ---------- dbpath : str The path to the db folder. dbname : str The name of the database in mongo. host : str The hostname or IP address or Unix domain socket path of a single mongod or mongos instance to connect to, or a mongodb URI, or a list of hostnames / mongodb URIs. uri : str Specify a resolvable URI connection string (enclose in quotes) to connect to the MongoDB deployment. """ for json_path in Path(dbpath).glob("*.json"): cmd = ["mongoimport"] if host is not None: cmd += ['--host', host, "--db", dbname] if uri is not None: cmd += ['--uri', uri] cmd += ["--collection", json_path.stem, "--file", str(json_path)] subprocess.check_call(cmd) return def import_yamls(dbpath: str, dbname: str, host: str = None, uri: str = None) -> None: """Import the yaml files to mongo db. Each yaml file will be a collection in the database. The _id will be the id_key for each doc in the yaml file. Parameters ---------- dbpath : str The path to the db folder. dbname : str The name of the database in mongo. host : str The hostname or IP address or Unix domain socket path of a single mongod or mongos instance to connect to, or a mongodb URI, or a list of hostnames / mongodb URIs. uri : str Specify a resolvable URI connection string (enclose in quotes) to connect to the MongoDB deployment. """ yaml_files = itertools.chain(Path(dbpath).glob('*.yaml'), Path(dbpath).glob('*.yml')) with TemporaryDirectory() as tempd: for yaml_file in yaml_files: json_file = Path(tempd).joinpath(yaml_file.with_suffix('.json').name) loader = YAML(typ='safe') loader.constructor.yaml_constructors[u'tag:yaml.org,2002:timestamp'] = \ loader.constructor.yaml_constructors[u'tag:yaml.org,2002:str'] fsclient.yaml_to_json(str(yaml_file), str(json_file), loader=loader) import_jsons(tempd, dbname, host=host, uri=uri) return def load_mongo_col(col: Collection) -> dict: """Load the pymongo collection to a dictionary. In the dictionary. The key will be the '_id' and in each value which is a dictionary there will also be a key '_id' so that the structure will be the same as the filesystem collection. Parameters ---------- col : Collection The mongodb collection. Returns ------- dct : dict A dictionary with all the info in the collection. """ return { doc['_id']: doc for doc in col.find({}) } @fallback(ON_PYMONGO_V2, None) class InsertOneProxy(object): def __init__(self, inserted_id, acknowledged): self.inserted_id = inserted_id self.acknowledged = acknowledged class MongoClient: """A client backed by MongoDB. The mongodb server will be automatically opened when the client is initiated. Attributes ---------- rc : RunControl The RunControl. It may include the 'mongohost' attribute to initiate the client. client : MongoClient The mongo client. It is initiate from the 'mongohost' attribute if it exists in rc. Otherwise, it will be initiated from the 'localhost'. proc : Popen The Popen of 'mongod --dpath <mongodbpath>'. The 'mongodbpath' is from rc. """ def __init__(self, rc): if not MONGO_AVAILABLE: raise RuntimeError( "MongoDB is not available on the current system." ) self.rc = rc self.client = None self.proc = None self.dbs = defaultdict(lambda: defaultdict(dict)) self.chained_db = dict() self.closed = True # actually startup mongo self.open() def _preclean(self): mongodbpath = self.rc.mongodbpath if os.path.isdir(mongodbpath): shutil.rmtree(mongodbpath) os.makedirs(mongodbpath) def _startserver(self): mongodbpath = self.rc.mongodbpath self.proc = subprocess.Popen( ["mongod", "--fork", "--syslog", "--dbpath", mongodbpath], universal_newlines=True ) print("mongod pid: {0}".format(self.proc.pid), file=sys.stderr) def is_alive(self): """Returns whether or not the client is alive and availabe to send/recieve data. """ if self.client is None: return False elif ON_PYMONGO_V2: return self.client.alive() elif ON_PYMONGO_V3: alive = False if self.rc.local is False: from pymongo.errors import ConnectionFailure try: # The ismaster command is cheap and does not require auth. self.client.admin.command('ismaster') alive = True except ConnectionFailure: print("Server not available") alive = False else: cmd = ["mongostat", "--host", "localhost", "-n", "1"] try: subprocess.check_call(cmd) alive = True except subprocess.CalledProcessError: alive = False return alive else: return False def open(self): """Opens the database client""" rc = self.rc if hasattr(rc, 'host'): host = getattr(rc, 'host') else: dbs = getattr(rc, 'databases') host = dbs[0]['url'] self.client = pymongo.MongoClient(host) if not self.is_alive(): # we need to wait for the server to startup self._preclean() self._startserver() time.sleep(0.1) self.closed = False def load_database(self, db: dict): """Load the database information from mongo database. It populate the 'dbs' attribute with a dictionary like {database: {collection: docs_dict}}. Parameters ---------- db : dict The dictionary of data base information, such as 'name'. """ dbs: dict = self.dbs client: pymongo.MongoClient = self.client mongodb = client[db['name']] for colname in mongodb.list_collection_names(): col = mongodb[colname] dbs[db['name']][colname] = load_mongo_col(col) return def import_database(self, db: dict): """Import the database from filesystem to the mongo backend. Parameters ---------- db : dict The dictionary of data base information, such as 'name'. """ host = getattr(self.rc, 'host', None) uri = db.get('dst_url', None) dbpath = dbpathname(db, self.rc) dbname = db['name'] import_jsons(dbpath, dbname, host=host, uri=uri) import_yamls(dbpath, dbname, host=host, uri=uri) return def dump_database(self, db): """Dumps a database dict via mongoexport.""" dbpath = dbpathname(db, self.rc) os.makedirs(dbpath, exist_ok=True) to_add = [] colls = self.client[db["name"]].collection_names( include_system_collections=False ) for collection in colls: f = os.path.join(dbpath, collection + ".json") cmd = [ "mongoexport", "--db", db["name"], "--collection", collection, "--out", f, ] subprocess.check_call(cmd) to_add.append(os.path.join(db["path"], collection + ".json")) return to_add def close(self): """Closes the database connection.""" self.closed = True return def keys(self): return self.client.database_names() def __getitem__(self, key): return self.client[key] def collection_names(self, dbname): """Returns the collection names for the database name.""" return self.client[dbname].collection_names() def all_documents(self, collname, copy=True): """Returns an iterable over all documents in a collection.""" if copy: return deepcopy(self.chained_db.get(collname, {})).values() return self.chained_db.get(collname, {}).values() def insert_one(self, dbname, collname, doc): """Inserts one document to a database/collection.""" coll = self.client[dbname][collname] if ON_PYMONGO_V2: i = coll.insert(doc) return InsertOneProxy(i, True) else: return coll.insert_one(doc) def insert_many(self, dbname, collname, docs): """Inserts many documents into a database/collection.""" coll = self.client[dbname][collname] if ON_PYMONGO_V2: return coll.insert(docs) else: return coll.insert_many(docs) def delete_one(self, dbname, collname, doc): """Removes a single document from a collection""" coll = self.client[dbname][collname] if ON_PYMONGO_V2: return coll.remove(doc, multi=False) else: return coll.delete_one(doc) def update_one(self, dbname, collname, filter, update, **kwargs): """Updates one document.""" coll = self.client[dbname][collname] if ON_PYMONGO_V2: doc = coll.find_one(filter) if doc is None: if not kwargs.get("upsert", False): raise RuntimeError( "could not update non-existing document" ) newdoc = dict(filter) newdoc.update(update["$set"]) return self.insert_one(dbname, collname, newdoc) return coll.update(doc, update, **kwargs) else: return coll.find_one_and_update(filter, update, **kwargs)
from datetime import date from unittest import mock from django.contrib.auth import ( BACKEND_SESSION_KEY, SESSION_KEY, authenticate, get_user, signals, ) from django.contrib.auth.backends import ModelBackend from django.contrib.auth.hashers import MD5PasswordHasher from django.contrib.auth.models import AnonymousUser, Group, Permission, User from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.http import HttpRequest from django.test import ( SimpleTestCase, TestCase, modify_settings, override_settings, ) from .models import ( CustomPermissionsUser, CustomUser, CustomUserWithoutIsActiveField, ExtensionUser, UUIDUser, ) class CountingMD5PasswordHasher(MD5PasswordHasher): """Hasher that counts how many times it computes a hash.""" calls = 0 def encode(self, *args, **kwargs): type(self).calls += 1 return super().encode(*args, **kwargs) class BaseModelBackendTest: """ A base class for tests that need to validate the ModelBackend with different User models. Subclasses should define a class level UserModel attribute, and a create_users() method to construct two users for test purposes. """ backend = 'django.contrib.auth.backends.ModelBackend' def setUp(self): self.patched_settings = modify_settings( AUTHENTICATION_BACKENDS={'append': self.backend}, ) self.patched_settings.enable() self.create_users() def tearDown(self): self.patched_settings.disable() # The custom_perms test messes with ContentTypes, which will # be cached; flush the cache to ensure there are no side effects # Refs #14975, #14925 ContentType.objects.clear_cache() def test_has_perm(self): user = self.UserModel._default_manager.get(pk=self.user.pk) self.assertIs(user.has_perm('auth.test'), False) user.is_staff = True user.save() self.assertIs(user.has_perm('auth.test'), False) user.is_superuser = True user.save() self.assertIs(user.has_perm('auth.test'), True) user.is_staff = True user.is_superuser = True user.is_active = False user.save() self.assertIs(user.has_perm('auth.test'), False) def test_custom_perms(self): user = self.UserModel._default_manager.get(pk=self.user.pk) content_type = ContentType.objects.get_for_model(Group) perm = Permission.objects.create(name='test', content_type=content_type, codename='test') user.user_permissions.add(perm) # reloading user to purge the _perm_cache user = self.UserModel._default_manager.get(pk=self.user.pk) self.assertEqual(user.get_all_permissions(), {'auth.test'}) self.assertEqual(user.get_group_permissions(), set()) self.assertIs(user.has_module_perms('Group'), False) self.assertIs(user.has_module_perms('auth'), True) perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2') user.user_permissions.add(perm) perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3') user.user_permissions.add(perm) user = self.UserModel._default_manager.get(pk=self.user.pk) self.assertEqual(user.get_all_permissions(), {'auth.test2', 'auth.test', 'auth.test3'}) self.assertIs(user.has_perm('test'), False) self.assertIs(user.has_perm('auth.test'), True) self.assertIs(user.has_perms(['auth.test2', 'auth.test3']), True) perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group') group = Group.objects.create(name='test_group') group.permissions.add(perm) user.groups.add(group) user = self.UserModel._default_manager.get(pk=self.user.pk) exp = {'auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'} self.assertEqual(user.get_all_permissions(), exp) self.assertEqual(user.get_group_permissions(), {'auth.test_group'}) self.assertIs(user.has_perms(['auth.test3', 'auth.test_group']), True) user = AnonymousUser() self.assertIs(user.has_perm('test'), False) self.assertIs(user.has_perms(['auth.test2', 'auth.test3']), False) def test_has_no_object_perm(self): """Regressiontest for #12462""" user = self.UserModel._default_manager.get(pk=self.user.pk) content_type = ContentType.objects.get_for_model(Group) perm = Permission.objects.create(name='test', content_type=content_type, codename='test') user.user_permissions.add(perm) self.assertIs(user.has_perm('auth.test', 'object'), False) self.assertEqual(user.get_all_permissions('object'), set()) self.assertIs(user.has_perm('auth.test'), True) self.assertEqual(user.get_all_permissions(), {'auth.test'}) def test_anonymous_has_no_permissions(self): """ #17903 -- Anonymous users shouldn't have permissions in ModelBackend.get_(all|user|group)_permissions(). """ backend = ModelBackend() user = self.UserModel._default_manager.get(pk=self.user.pk) content_type = ContentType.objects.get_for_model(Group) user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user') group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group') user.user_permissions.add(user_perm) group = Group.objects.create(name='test_group') user.groups.add(group) group.permissions.add(group_perm) self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'}) self.assertEqual(backend.get_user_permissions(user), {'auth.test_user'}) self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'}) with mock.patch.object(self.UserModel, 'is_anonymous', True): self.assertEqual(backend.get_all_permissions(user), set()) self.assertEqual(backend.get_user_permissions(user), set()) self.assertEqual(backend.get_group_permissions(user), set()) def test_inactive_has_no_permissions(self): """ #17903 -- Inactive users shouldn't have permissions in ModelBackend.get_(all|user|group)_permissions(). """ backend = ModelBackend() user = self.UserModel._default_manager.get(pk=self.user.pk) content_type = ContentType.objects.get_for_model(Group) user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user') group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group') user.user_permissions.add(user_perm) group = Group.objects.create(name='test_group') user.groups.add(group) group.permissions.add(group_perm) self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'}) self.assertEqual(backend.get_user_permissions(user), {'auth.test_user'}) self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'}) user.is_active = False user.save() self.assertEqual(backend.get_all_permissions(user), set()) self.assertEqual(backend.get_user_permissions(user), set()) self.assertEqual(backend.get_group_permissions(user), set()) def test_get_all_superuser_permissions(self): """A superuser has all permissions. Refs #14795.""" user = self.UserModel._default_manager.get(pk=self.superuser.pk) self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all())) @override_settings(PASSWORD_HASHERS=['auth_tests.test_auth_backends.CountingMD5PasswordHasher']) def test_authentication_timing(self): """Hasher is run once regardless of whether the user exists. Refs #20760.""" # Re-set the password, because this tests overrides PASSWORD_HASHERS self.user.set_password('test') self.user.save() CountingMD5PasswordHasher.calls = 0 username = getattr(self.user, self.UserModel.USERNAME_FIELD) authenticate(username=username, password='test') self.assertEqual(CountingMD5PasswordHasher.calls, 1) CountingMD5PasswordHasher.calls = 0 authenticate(username='no_such_user', password='test') self.assertEqual(CountingMD5PasswordHasher.calls, 1) class ModelBackendTest(BaseModelBackendTest, TestCase): """ Tests for the ModelBackend using the default User model. """ UserModel = User user_credentials = {'username': 'test', 'password': 'test'} def create_users(self): self.user = User.objects.create_user(email='test@example.com', **self.user_credentials) self.superuser = User.objects.create_superuser( username='test2', email='test2@example.com', password='test', ) def test_authenticate_inactive(self): """ An inactive user can't authenticate. """ self.assertEqual(authenticate(**self.user_credentials), self.user) self.user.is_active = False self.user.save() self.assertIsNone(authenticate(**self.user_credentials)) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithoutIsActiveField') def test_authenticate_user_without_is_active_field(self): """ A custom user without an `is_active` field is allowed to authenticate. """ user = CustomUserWithoutIsActiveField.objects._create_user( username='test', email='test@example.com', password='test', ) self.assertEqual(authenticate(username='test', password='test'), user) @override_settings(AUTH_USER_MODEL='auth_tests.ExtensionUser') class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase): """ Tests for the ModelBackend using the custom ExtensionUser model. This isn't a perfect test, because both the User and ExtensionUser are synchronized to the database, which wouldn't ordinary happen in production. As a result, it doesn't catch errors caused by the non- existence of the User table. The specific problem is queries on .filter(groups__user) et al, which makes an implicit assumption that the user model is called 'User'. In production, the auth.User table won't exist, so the requested join won't exist either; in testing, the auth.User *does* exist, and so does the join. However, the join table won't contain any useful data; for testing, we check that the data we expect actually does exist. """ UserModel = ExtensionUser def create_users(self): self.user = ExtensionUser._default_manager.create_user( username='test', email='test@example.com', password='test', date_of_birth=date(2006, 4, 25) ) self.superuser = ExtensionUser._default_manager.create_superuser( username='test2', email='test2@example.com', password='test', date_of_birth=date(1976, 11, 8) ) @override_settings(AUTH_USER_MODEL='auth_tests.CustomPermissionsUser') class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase): """ Tests for the ModelBackend using the CustomPermissionsUser model. As with the ExtensionUser test, this isn't a perfect test, because both the User and CustomPermissionsUser are synchronized to the database, which wouldn't ordinary happen in production. """ UserModel = CustomPermissionsUser def create_users(self): self.user = CustomPermissionsUser._default_manager.create_user( email='test@example.com', password='test', date_of_birth=date(2006, 4, 25) ) self.superuser = CustomPermissionsUser._default_manager.create_superuser( email='test2@example.com', password='test', date_of_birth=date(1976, 11, 8) ) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUser') class CustomUserModelBackendAuthenticateTest(TestCase): """ The model backend can accept a credentials kwarg labeled with custom user model's USERNAME_FIELD. """ def test_authenticate(self): test_user = CustomUser._default_manager.create_user( email='test@example.com', password='test', date_of_birth=date(2006, 4, 25) ) authenticated_user = authenticate(email='test@example.com', password='test') self.assertEqual(test_user, authenticated_user) @override_settings(AUTH_USER_MODEL='auth_tests.UUIDUser') class UUIDUserTests(TestCase): def test_login(self): """ A custom user with a UUID primary key should be able to login. """ user = UUIDUser.objects.create_user(username='uuid', password='test') self.assertTrue(self.client.login(username='uuid', password='test')) self.assertEqual(UUIDUser.objects.get(pk=self.client.session[SESSION_KEY]), user) class TestObj: pass class SimpleRowlevelBackend: def has_perm(self, user, perm, obj=None): if not obj: return # We only support row level perms if isinstance(obj, TestObj): if user.username == 'test2': return True elif user.is_anonymous and perm == 'anon': return True elif not user.is_active and perm == 'inactive': return True return False def has_module_perms(self, user, app_label): if not user.is_anonymous and not user.is_active: return False return app_label == "app1" def get_all_permissions(self, user, obj=None): if not obj: return [] # We only support row level perms if not isinstance(obj, TestObj): return ['none'] if user.is_anonymous: return ['anon'] if user.username == 'test2': return ['simple', 'advanced'] else: return ['simple'] def get_group_permissions(self, user, obj=None): if not obj: return # We only support row level perms if not isinstance(obj, TestObj): return ['none'] if 'test_group' in [group.name for group in user.groups.all()]: return ['group_perm'] else: return ['none'] @modify_settings(AUTHENTICATION_BACKENDS={ 'append': 'auth_tests.test_auth_backends.SimpleRowlevelBackend', }) class RowlevelBackendTest(TestCase): """ Tests for auth backend that supports object level permissions """ def setUp(self): self.user1 = User.objects.create_user('test', 'test@example.com', 'test') self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test') self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test') def tearDown(self): # The get_group_permissions test messes with ContentTypes, which will # be cached; flush the cache to ensure there are no side effects # Refs #14975, #14925 ContentType.objects.clear_cache() def test_has_perm(self): self.assertIs(self.user1.has_perm('perm', TestObj()), False) self.assertIs(self.user2.has_perm('perm', TestObj()), True) self.assertIs(self.user2.has_perm('perm'), False) self.assertIs(self.user2.has_perms(['simple', 'advanced'], TestObj()), True) self.assertIs(self.user3.has_perm('perm', TestObj()), False) self.assertIs(self.user3.has_perm('anon', TestObj()), False) self.assertIs(self.user3.has_perms(['simple', 'advanced'], TestObj()), False) def test_get_all_permissions(self): self.assertEqual(self.user1.get_all_permissions(TestObj()), {'simple'}) self.assertEqual(self.user2.get_all_permissions(TestObj()), {'simple', 'advanced'}) self.assertEqual(self.user2.get_all_permissions(), set()) def test_get_group_permissions(self): group = Group.objects.create(name='test_group') self.user3.groups.add(group) self.assertEqual(self.user3.get_group_permissions(TestObj()), {'group_perm'}) @override_settings( AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'], ) class AnonymousUserBackendTest(SimpleTestCase): """ Tests for AnonymousUser delegating to backend. """ def setUp(self): self.user1 = AnonymousUser() def test_has_perm(self): self.assertIs(self.user1.has_perm('perm', TestObj()), False) self.assertIs(self.user1.has_perm('anon', TestObj()), True) def test_has_perms(self): self.assertIs(self.user1.has_perms(['anon'], TestObj()), True) self.assertIs(self.user1.has_perms(['anon', 'perm'], TestObj()), False) def test_has_module_perms(self): self.assertIs(self.user1.has_module_perms("app1"), True) self.assertIs(self.user1.has_module_perms("app2"), False) def test_get_all_permissions(self): self.assertEqual(self.user1.get_all_permissions(TestObj()), {'anon'}) @override_settings(AUTHENTICATION_BACKENDS=[]) class NoBackendsTest(TestCase): """ An appropriate error is raised if no auth backends are provided. """ def setUp(self): self.user = User.objects.create_user('test', 'test@example.com', 'test') def test_raises_exception(self): msg = ( 'No authentication backends have been defined. ' 'Does AUTHENTICATION_BACKENDS contain anything?' ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.user.has_perm(('perm', TestObj())) @override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend']) class InActiveUserBackendTest(TestCase): """ Tests for an inactive user """ def setUp(self): self.user1 = User.objects.create_user('test', 'test@example.com', 'test') self.user1.is_active = False self.user1.save() def test_has_perm(self): self.assertIs(self.user1.has_perm('perm', TestObj()), False) self.assertIs(self.user1.has_perm('inactive', TestObj()), True) def test_has_module_perms(self): self.assertIs(self.user1.has_module_perms("app1"), False) self.assertIs(self.user1.has_module_perms("app2"), False) class PermissionDeniedBackend: """ Always raises PermissionDenied in `authenticate`, `has_perm` and `has_module_perms`. """ def authenticate(self, request, username=None, password=None): raise PermissionDenied def has_perm(self, user_obj, perm, obj=None): raise PermissionDenied def has_module_perms(self, user_obj, app_label): raise PermissionDenied class PermissionDeniedBackendTest(TestCase): """ Other backends are not checked once a backend raises PermissionDenied """ backend = 'auth_tests.test_auth_backends.PermissionDeniedBackend' def setUp(self): self.user1 = User.objects.create_user('test', 'test@example.com', 'test') self.user_login_failed = [] signals.user_login_failed.connect(self.user_login_failed_listener) def tearDown(self): signals.user_login_failed.disconnect(self.user_login_failed_listener) def user_login_failed_listener(self, sender, credentials, **kwargs): self.user_login_failed.append(credentials) @modify_settings(AUTHENTICATION_BACKENDS={'prepend': backend}) def test_permission_denied(self): "user is not authenticated after a backend raises permission denied #2550" self.assertIsNone(authenticate(username='test', password='test')) # user_login_failed signal is sent. self.assertEqual(self.user_login_failed, [{'password': '********************', 'username': 'test'}]) @modify_settings(AUTHENTICATION_BACKENDS={'append': backend}) def test_authenticates(self): self.assertEqual(authenticate(username='test', password='test'), self.user1) @modify_settings(AUTHENTICATION_BACKENDS={'prepend': backend}) def test_has_perm_denied(self): content_type = ContentType.objects.get_for_model(Group) perm = Permission.objects.create(name='test', content_type=content_type, codename='test') self.user1.user_permissions.add(perm) self.assertIs(self.user1.has_perm('auth.test'), False) self.assertIs(self.user1.has_module_perms('auth'), False) @modify_settings(AUTHENTICATION_BACKENDS={'append': backend}) def test_has_perm(self): content_type = ContentType.objects.get_for_model(Group) perm = Permission.objects.create(name='test', content_type=content_type, codename='test') self.user1.user_permissions.add(perm) self.assertIs(self.user1.has_perm('auth.test'), True) self.assertIs(self.user1.has_module_perms('auth'), True) class NewModelBackend(ModelBackend): pass class ChangedBackendSettingsTest(TestCase): """ Tests for changes in the settings.AUTHENTICATION_BACKENDS """ backend = 'auth_tests.test_auth_backends.NewModelBackend' TEST_USERNAME = 'test_user' TEST_PASSWORD = 'test_password' TEST_EMAIL = 'test@example.com' def setUp(self): User.objects.create_user(self.TEST_USERNAME, self.TEST_EMAIL, self.TEST_PASSWORD) @override_settings(AUTHENTICATION_BACKENDS=[backend]) def test_changed_backend_settings(self): """ Removing a backend configured in AUTHENTICATION_BACKENDS makes already logged-in users disconnect. """ # Get a session for the test user self.assertTrue(self.client.login( username=self.TEST_USERNAME, password=self.TEST_PASSWORD) ) # Prepare a request object request = HttpRequest() request.session = self.client.session # Remove NewModelBackend with self.settings(AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend']): # Get the user from the request user = get_user(request) # Assert that the user retrieval is successful and the user is # anonymous as the backend is not longer available. self.assertIsNotNone(user) self.assertTrue(user.is_anonymous) class TypeErrorBackend: """ Always raises TypeError. """ def authenticate(self, request, username=None, password=None): raise TypeError class SkippedBackend: def authenticate(self): # Doesn't accept any credentials so is skipped by authenticate(). pass class AuthenticateTests(TestCase): def setUp(self): self.user1 = User.objects.create_user('test', 'test@example.com', 'test') @override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.TypeErrorBackend']) def test_type_error_raised(self): """A TypeError within a backend is propagated properly (#18171).""" with self.assertRaises(TypeError): authenticate(username='test', password='test') @override_settings(AUTHENTICATION_BACKENDS=( 'auth_tests.test_auth_backends.SkippedBackend', 'django.contrib.auth.backends.ModelBackend', )) def test_skips_backends_without_arguments(self): """ A backend (SkippedBackend) is ignored if it doesn't accept the credentials as arguments. """ self.assertEqual(authenticate(username='test', password='test'), self.user1) class ImproperlyConfiguredUserModelTest(TestCase): """ An exception from within get_user_model() is propagated and doesn't raise an UnboundLocalError (#21439). """ def setUp(self): self.user1 = User.objects.create_user('test', 'test@example.com', 'test') self.client.login(username='test', password='test') @override_settings(AUTH_USER_MODEL='thismodel.doesntexist') def test_does_not_shadow_exception(self): # Prepare a request object request = HttpRequest() request.session = self.client.session msg = ( "AUTH_USER_MODEL refers to model 'thismodel.doesntexist' " "that has not been installed" ) with self.assertRaisesMessage(ImproperlyConfigured, msg): get_user(request) class ImportedModelBackend(ModelBackend): pass class CustomModelBackend(ModelBackend): pass class OtherModelBackend(ModelBackend): pass class ImportedBackendTests(TestCase): """ #23925 - The backend path added to the session should be the same as the one defined in AUTHENTICATION_BACKENDS setting. """ backend = 'auth_tests.backend_alias.ImportedModelBackend' @override_settings(AUTHENTICATION_BACKENDS=[backend]) def test_backend_path(self): username = 'username' password = 'password' User.objects.create_user(username, 'email', password) self.assertTrue(self.client.login(username=username, password=password)) request = HttpRequest() request.session = self.client.session self.assertEqual(request.session[BACKEND_SESSION_KEY], self.backend) class SelectingBackendTests(TestCase): backend = 'auth_tests.test_auth_backends.CustomModelBackend' other_backend = 'auth_tests.test_auth_backends.OtherModelBackend' username = 'username' password = 'password' def assertBackendInSession(self, backend): request = HttpRequest() request.session = self.client.session self.assertEqual(request.session[BACKEND_SESSION_KEY], backend) @override_settings(AUTHENTICATION_BACKENDS=[backend]) def test_backend_path_login_without_authenticate_single_backend(self): user = User.objects.create_user(self.username, 'email', self.password) self.client._login(user) self.assertBackendInSession(self.backend) @override_settings(AUTHENTICATION_BACKENDS=[backend, other_backend]) def test_backend_path_login_without_authenticate_multiple_backends(self): user = User.objects.create_user(self.username, 'email', self.password) expected_message = ( 'You have multiple authentication backends configured and ' 'therefore must provide the `backend` argument or set the ' '`backend` attribute on the user.' ) with self.assertRaisesMessage(ValueError, expected_message): self.client._login(user) @override_settings(AUTHENTICATION_BACKENDS=[backend, other_backend]) def test_backend_path_login_with_explicit_backends(self): user = User.objects.create_user(self.username, 'email', self.password) self.client._login(user, self.other_backend) self.assertBackendInSession(self.other_backend) @override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']) class AllowAllUsersModelBackendTest(TestCase): """ Inactive users may authenticate with the AllowAllUsersModelBackend. """ user_credentials = {'username': 'test', 'password': 'test'} @classmethod def setUpTestData(cls): cls.user = User.objects.create_user( email='test@example.com', is_active=False, **cls.user_credentials ) def test_authenticate(self): self.assertFalse(self.user.is_active) self.assertEqual(authenticate(**self.user_credentials), self.user) def test_get_user(self): self.client.force_login(self.user) request = HttpRequest() request.session = self.client.session user = get_user(request) self.assertEqual(user, self.user)
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide a base class for all objects (called Bokeh Models) that can go in a Bokeh |Document|. ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import annotations import logging # isort:skip log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from inspect import Parameter, Signature, isclass from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Set, Type, ) # Bokeh imports from ..core import properties as p from ..core.has_props import HasProps, abstract from ..core.property._sphinx import type_link from ..core.property.validation import without_property_validation from ..core.serialization import ObjectRefRep, Ref, Serializer from ..core.types import ID, Unknown from ..events import Event from ..themes import default as default_theme from ..util.callback_manager import EventCallbackManager, PropertyCallbackManager from ..util.serialization import make_id from .docs import html_repr, process_example from .util import HasDocumentRef, collect_models, visit_value_and_its_immediate_references if TYPE_CHECKING: from ..core.has_props import Setter from ..core.query import SelectorType from ..document import Document from ..document.events import DocumentPatchedEvent from ..models.callbacks import Callback as JSEventCallback from ..util.callback_manager import PropertyCallback #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'Model', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- @abstract class Model(HasProps, HasDocumentRef, PropertyCallbackManager, EventCallbackManager): ''' Base class for all objects stored in Bokeh |Document| instances. ''' # a canonical order for positional args that can be # used for any functions derived from this class _args = () _extra_kws = {} @classmethod def __init_subclass__(cls): super().__init_subclass__() if cls.__module__.startswith("bokeh.models"): assert "__init__" in cls.__dict__, str(cls) parameters = [x[0] for x in cls.parameters()] cls.__init__.__signature__ = Signature(parameters=parameters) process_example(cls) _id: ID def __new__(cls, *args, **kwargs) -> Model: obj = super().__new__(cls) obj._id = kwargs.pop("id", make_id()) return obj def __init__(self, *args, **kwargs: Any) -> None: # "id" is popped from **kw in __new__, so in an ideal world I don't # think it should be here too. But Python has subtle behavior here, so # it is necessary kwargs.pop("id", None) super().__init__(**kwargs) default_theme.apply_to_model(self) def __str__(self) -> str: name = self.__class__.__name__ return f"{name}(id={self.id!r}, ...)" __repr__ = __str__ def destroy(self) -> None: ''' Clean up references to the document and property ''' self._document = None self._temp_document = None self._property_values.clear() @property def id(self) -> ID: return self._id name: str | None = p.Nullable(p.String, help=""" An arbitrary, user-supplied name for this model. This name can be useful when querying the document to retrieve specific Bokeh models. .. code:: python >>> plot.circle([1,2,3], [4,5,6], name="temp") >>> plot.select(name="temp") [GlyphRenderer(id='399d53f5-73e9-44d9-9527-544b761c7705', ...)] .. note:: No uniqueness guarantees or other conditions are enforced on any names that are provided, nor is the name used directly by Bokeh for any reason. """) tags: List[Any] = p.List(p.AnyRef, help=""" An optional list of arbitrary, user-supplied values to attach to this model. This data can be useful when querying the document to retrieve specific Bokeh models: .. code:: python >>> r = plot.circle([1,2,3], [4,5,6]) >>> r.tags = ["foo", 10] >>> plot.select(tags=['foo', 10]) [GlyphRenderer(id='1de4c3df-a83d-480a-899b-fb263d3d5dd9', ...)] Or simply a convenient way to attach any necessary metadata to a model that can be accessed by ``CustomJS`` callbacks, etc. .. note:: No uniqueness guarantees or other conditions are enforced on any tags that are provided, nor are the tags used directly by Bokeh for any reason. """) js_event_callbacks = p.Dict(p.String, p.List(p.Instance("bokeh.models.callbacks.CustomJS")), help=""" A mapping of event names to lists of ``CustomJS`` callbacks. Typically, rather then modifying this property directly, callbacks should be added using the ``Model.js_on_event`` method: .. code:: python callback = CustomJS(code="console.log('tap event occurred')") plot.js_on_event('tap', callback) """) subscribed_events = p.List(p.String, help=""" List of events that are subscribed to by Python callbacks. This is the set of events that will be communicated from BokehJS back to Python for this model. """) js_property_callbacks = p.Dict(p.String, p.List(p.Instance("bokeh.models.callbacks.CustomJS")), help=""" A mapping of attribute names to lists of ``CustomJS`` callbacks, to be set up on BokehJS side when the document is created. Typically, rather then modifying this property directly, callbacks should be added using the ``Model.js_on_change`` method: .. code:: python callback = CustomJS(code="console.log('stuff')") plot.x_range.js_on_change('start', callback) """) syncable: bool = p.Bool(default=True, help=""" Indicates whether this model should be synchronized back to a Bokeh server when updated in a web browser. Setting to ``False`` may be useful to reduce network traffic when dealing with frequently updated objects whose updated values we don't need. .. note:: Setting this property to ``False`` will prevent any ``on_change()`` callbacks on this object from triggering. However, any JS-side callbacks will still work. """) # Properties -------------------------------------------------------------- @property def ref(self) -> Ref: return Ref(id=self._id) # Public methods ---------------------------------------------------------- @classmethod @without_property_validation def parameters(cls): ''' Generate Python ``Parameter`` values suitable for functions that are derived from the glyph. Returns: list(Parameter) ''' arg_params = [] no_more_defaults = False for arg in reversed(cls._args): descriptor = cls.lookup(arg) default = descriptor.class_default(cls, no_eval=True) if default is None: no_more_defaults = True # simplify field(x) defaults to just present the column name if isinstance(default, dict) and set(default) == {"field"}: default = default["field"] # make sure built-ins don't hold on to references to actual Models if cls.__module__.startswith("bokeh.models"): assert not isinstance(default, Model) param = Parameter( name=arg, kind=Parameter.POSITIONAL_OR_KEYWORD, # For positional arg properties, default=None means no default. default=Parameter.empty if no_more_defaults else default ) if default: del default typ = type_link(descriptor.property) arg_params.insert(0, (param, typ, descriptor.__doc__)) # these are not really useful, and should also really be private, just skip them omissions = {'js_event_callbacks', 'js_property_callbacks', 'subscribed_events'} kwarg_params = [] kws = set(cls.properties()) - set(cls._args) - omissions for kw in kws: descriptor = cls.lookup(kw) default = descriptor.class_default(cls, no_eval=True) # simplify field(x) defaults to just present the column name if isinstance(default, dict) and set(default) == {"field"}: default = default["field"] # make sure built-ins don't hold on to references to actual Models if cls.__module__.startswith("bokeh.models"): assert not isinstance(default, Model) param = Parameter( name=kw, kind=Parameter.KEYWORD_ONLY, default=default ) del default typ = type_link(descriptor.property) kwarg_params.append((param, typ, descriptor.__doc__)) for kw, (typ, doc) in cls._extra_kws.items(): param = Parameter( name=kw, kind=Parameter.KEYWORD_ONLY, ) kwarg_params.append((param, typ, doc)) kwarg_params.sort(key=lambda x: x[0].name) return arg_params + kwarg_params def js_on_event(self, event: str | Type[Event], *callbacks: JSEventCallback) -> None: if isinstance(event, str): pass elif isinstance(event, type) and issubclass(event, Event): event = event.event_name else: raise ValueError(f"expected string event name or event class, got {event}") all_callbacks = list(self.js_event_callbacks.get(event, [])) for callback in callbacks: if callback not in all_callbacks: all_callbacks.append(callback) self.js_event_callbacks[event] = all_callbacks def js_link(self, attr: str, other: Model, other_attr: str, attr_selector: int | str | None = None) -> None: ''' Link two Bokeh model properties using JavaScript. This is a convenience method that simplifies adding a CustomJS callback to update one Bokeh model property whenever another changes value. Args: attr (str) : The name of a Bokeh property on this model other (Model): A Bokeh model to link to self.attr other_attr (str) : The property on ``other`` to link together attr_selector (Union[int, str]) : The index to link an item in a subscriptable ``attr`` Added in version 1.1 Raises: ValueError Examples: This code with ``js_link``: .. code :: python select.js_link('value', plot, 'sizing_mode') is equivalent to the following: .. code:: python from bokeh.models import CustomJS select.js_on_change('value', CustomJS(args=dict(other=plot), code="other.sizing_mode = this.value" ) ) Additionally, to use attr_selector to attach the left side of a range slider to a plot's x_range: .. code :: python range_slider.js_link('value', plot.x_range, 'start', attr_selector=0) which is equivalent to: .. code :: python from bokeh.models import CustomJS range_slider.js_on_change('value', CustomJS(args=dict(other=plot.x_range), code="other.start = this.value[0]" ) ) ''' descriptor = self.lookup(attr, raises=False) if descriptor is None: raise ValueError("%r is not a property of self (%r)" % (attr, self)) if not isinstance(other, Model): raise ValueError("'other' is not a Bokeh model: %r" % other) other_descriptor = other.lookup(other_attr, raises=False) if other_descriptor is None: raise ValueError("%r is not a property of other (%r)" % (other_attr, other)) from bokeh.models import CustomJS selector = f"[{attr_selector!r}]" if attr_selector is not None else "" cb = CustomJS(args=dict(other=other), code=f"other.{other_descriptor.name} = this.{descriptor.name}{selector}") self.js_on_change(attr, cb) def js_on_change(self, event: str, *callbacks: JSEventCallback) -> None: ''' Attach a ``CustomJS`` callback to an arbitrary BokehJS model event. On the BokehJS side, change events for model properties have the form ``"change:property_name"``. As a convenience, if the event name passed to this method is also the name of a property on the model, then it will be prefixed with ``"change:"`` automatically: .. code:: python # these two are equivalent source.js_on_change('data', callback) source.js_on_change('change:data', callback) However, there are other kinds of events that can be useful to respond to, in addition to property change events. For example to run a callback whenever data is streamed to a ``ColumnDataSource``, use the ``"stream"`` event on the source: .. code:: python source.js_on_change('streaming', callback) ''' if len(callbacks) == 0: raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter") # handle any CustomJS callbacks here from bokeh.models import CustomJS if not all(isinstance(x, CustomJS) for x in callbacks): raise ValueError("not all callback values are CustomJS instances") descriptor = self.lookup(event, raises=False) if descriptor is not None: event = f"change:{descriptor.name}" old = {k: [cb for cb in cbs] for k, cbs in self.js_property_callbacks.items()} if event not in self.js_property_callbacks: self.js_property_callbacks[event] = [] for callback in callbacks: if callback in self.js_property_callbacks[event]: continue self.js_property_callbacks[event].append(callback) self.trigger('js_property_callbacks', old, self.js_property_callbacks) def on_change(self, attr: str, *callbacks: PropertyCallback) -> None: ''' Add a callback on this object to trigger when ``attr`` changes. Args: attr (str) : an attribute name on this object *callbacks (callable) : callback functions to register Returns: None Example: .. code-block:: python widget.on_change('value', callback1, callback2, ..., callback_n) ''' descriptor = self.lookup(attr) super().on_change(descriptor.name, *callbacks) def references(self) -> Set[Model]: ''' Returns all ``Models`` that this object has references to. ''' return set(collect_models(self)) def select(self, selector: SelectorType) -> Iterable[Model]: ''' Query this object and all of its references for objects that match the given selector. Args: selector (JSON-like) : Returns: seq[Model] ''' from ..core.query import find return find(self.references(), selector) def select_one(self, selector: SelectorType) -> Model | None: ''' Query this object and all of its references for objects that match the given selector. Raises an error if more than one object is found. Returns single matching object, or None if nothing is found Args: selector (JSON-like) : Returns: Model ''' result = list(self.select(selector)) if len(result) > 1: raise ValueError("Found more than one object matching %s: %r" % (selector, result)) if len(result) == 0: return None return result[0] def set_select(self, selector: Type[Model] | SelectorType, updates: Dict[str, Unknown]) -> None: ''' Update objects that match a given selector with the specified attribute/value updates. Args: selector (JSON-like) : updates (dict) : Returns: None ''' if isclass(selector) and issubclass(selector, Model): selector = dict(type=selector) for obj in self.select(selector): for key, val in updates.items(): setattr(obj, key, val) def to_serializable(self, serializer: Serializer) -> ObjectRefRep: serializer.add_ref(self, self.ref) super_rep = super().to_serializable(serializer) rep = ObjectRefRep( type="object", name=super_rep["name"], id=self.id, ) attributes = super_rep.get("attributes") if attributes is not None: rep["attributes"] = attributes return rep def trigger(self, attr: str, old: Unknown, new: Unknown, hint: DocumentPatchedEvent | None = None, setter: Setter | None = None) -> None: ''' ''' # The explicit assumption here is that hinted events do not need to # go through all the same invalidation steps. Currently this is the # case for ColumnsStreamedEvent and ColumnsPatchedEvent. However, # this may need to be further refined in the future, if the # assumption does not hold for future hinted events (e.g. the hint # could specify explicitly whether to do normal invalidation or not) if hint is None: dirty_count = 0 def mark_dirty(_: HasProps): nonlocal dirty_count dirty_count += 1 if self._document is not None: visit_value_and_its_immediate_references(new, mark_dirty) visit_value_and_its_immediate_references(old, mark_dirty) if dirty_count > 0: self.document.models.invalidate() # chain up to invoke callbacks descriptor = self.lookup(attr) super().trigger(descriptor.name, old, new, hint=hint, setter=setter) def _attach_document(self, doc: Document) -> None: ''' Attach a model to a Bokeh |Document|. This private interface should only ever called by the Document implementation to set the private ._document field properly ''' if self.document is doc: # nothing to do return if self.document is not None: raise RuntimeError(f"Models must be owned by only a single document, {self!r} is already in a doc") doc.theme.apply_to_model(self) self.document = doc self._update_event_callbacks() @classmethod def _clear_extensions(cls) -> None: cls.model_class_reverse_map = { k: v for k, v in cls.model_class_reverse_map.items() if getattr(v, "__implementation__", None) is None and getattr(v, "__javascript__", None) is None and getattr(v, "__css__", None) is None } def _detach_document(self) -> None: ''' Detach a model from a Bokeh |Document|. This private interface should only ever called by the Document implementation to unset the private ._document field properly ''' self.document = None default_theme.apply_to_model(self) def _repr_html_(self) -> str: return html_repr(self) def _sphinx_height_hint(self) -> int|None: return None #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
# ./darwinpush/xb/raw/tor.py # -*- coding: utf-8 -*- # PyXB bindings for NM:454478a728396cf19a8fffa7ba6764ae205b33f8 # Generated 2015-04-23 16:42:14.516405 by PyXB version 1.2.4 using Python 3.4.1.final.0 # Namespace http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1 [xmlns:tor] from __future__ import unicode_literals import pyxb import pyxb.binding import pyxb.binding.saxer import io import pyxb.utils.utility import pyxb.utils.domutils import sys import pyxb.utils.six as _six # Unique identifier for bindings created at the same time _GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0') # Version of PyXB used to generate the bindings _PyXBVersion = '1.2.4' # Generated bindings are not compatible across PyXB versions if pyxb.__version__ != _PyXBVersion: raise pyxb.PyXBVersionError(_PyXBVersion) # Import bindings for namespaces imported into schema import pyxb.binding.datatypes import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct # NOTE: All namespace declarations are reserved within the binding Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1', create_if_missing=True) Namespace.configureCategories(['typeBinding', 'elementBinding']) def CreateFromDocument (xml_text, default_namespace=None, location_base=None): """Parse the given XML and use the document element to create a Python instance. @param xml_text An XML document. This should be data (Python 2 str or Python 3 bytes), or a text (Python 2 unicode or Python 3 str) in the L{pyxb._InputEncoding} encoding. @keyword default_namespace The L{pyxb.Namespace} instance to use as the default namespace where there is no default namespace in scope. If unspecified or C{None}, the namespace of the module containing this function will be used. @keyword location_base: An object to be recorded as the base of all L{pyxb.utils.utility.Location} instances associated with events and objects handled by the parser. You might pass the URI from which the document was obtained. """ if pyxb.XMLStyle_saxer != pyxb._XMLStyle: dom = pyxb.utils.domutils.StringToDOM(xml_text) return CreateFromDOM(dom.documentElement, default_namespace=default_namespace) if default_namespace is None: default_namespace = Namespace.fallbackNamespace() saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base) handler = saxer.getContentHandler() xmld = xml_text if isinstance(xmld, _six.text_type): xmld = xmld.encode(pyxb._InputEncoding) saxer.parse(io.BytesIO(xmld)) instance = handler.rootObject() return instance def CreateFromDOM (node, default_namespace=None): """Create a Python instance from the given DOM node. The node tag must correspond to an element declaration in this module. @deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}.""" if default_namespace is None: default_namespace = Namespace.fallbackNamespace() return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace) # Complex type {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}TrainOrderItem with content type ELEMENT_ONLY class TrainOrderItem (pyxb.binding.basis.complexTypeDefinition): """Describes the identifier of a train in the train order""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY _Abstract = False _ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'TrainOrderItem') _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 16, 1) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}rid uses Python identifier rid __rid = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'rid'), 'rid', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrderItem_httpwww_thalesgroup_comrttiPushPortTrainOrderv1rid', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 21, 3), ) rid = property(__rid.value, __rid.set, None, 'For trains in the train order where the train is the Darwin timetable, it will be identified by its RID') # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}trainID uses Python identifier trainID __trainID = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'trainID'), 'trainID', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrderItem_httpwww_thalesgroup_comrttiPushPortTrainOrderv1trainID', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 37, 3), ) trainID = property(__trainID.value, __trainID.set, None, 'Where a train in the train order is not in the Darwin timetable, a Train ID (headcode) will be supplied') _ElementMap.update({ __rid.name() : __rid, __trainID.name() : __trainID }) _AttributeMap.update({ }) Namespace.addCategoryObject('typeBinding', 'TrainOrderItem', TrainOrderItem) # Complex type {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}TrainOrderData with content type ELEMENT_ONLY class TrainOrderData (pyxb.binding.basis.complexTypeDefinition): """Defines the sequence of trains making up the train order""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY _Abstract = False _ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'TrainOrderData') _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 44, 1) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}first uses Python identifier first __first = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'first'), 'first', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrderData_httpwww_thalesgroup_comrttiPushPortTrainOrderv1first', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 49, 3), ) first = property(__first.value, __first.set, None, 'The first train in the train order.') # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}second uses Python identifier second __second = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'second'), 'second', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrderData_httpwww_thalesgroup_comrttiPushPortTrainOrderv1second', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 55, 4), ) second = property(__second.value, __second.set, None, 'The second train in the train order.') # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}third uses Python identifier third __third = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'third'), 'third', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrderData_httpwww_thalesgroup_comrttiPushPortTrainOrderv1third', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 60, 4), ) third = property(__third.value, __third.set, None, 'The third train in the train order.') _ElementMap.update({ __first.name() : __first, __second.name() : __second, __third.name() : __third }) _AttributeMap.update({ }) Namespace.addCategoryObject('typeBinding', 'TrainOrderData', TrainOrderData) # Complex type [anonymous] with content type SIMPLE class CTD_ANON (pyxb.binding.basis.complexTypeDefinition): """For trains in the train order where the train is the Darwin timetable, it will be identified by its RID""" _TypeDefinition = _ImportedBinding_darwinpush_xb_ct.RIDType _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE _Abstract = False _ExpandedName = None _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 25, 4) _ElementMap = {} _AttributeMap = {} # Base type is _ImportedBinding_darwinpush_xb_ct.RIDType # Attribute wta uses Python identifier wta __wta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wta'), 'wta', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_CTD_ANON_wta', _ImportedBinding_darwinpush_xb_ct.WTimeType) __wta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 243, 2) __wta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 243, 2) wta = property(__wta.value, __wta.set, None, 'Working time of arrival.') # Attribute wtd uses Python identifier wtd __wtd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtd'), 'wtd', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_CTD_ANON_wtd', _ImportedBinding_darwinpush_xb_ct.WTimeType) __wtd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 248, 2) __wtd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 248, 2) wtd = property(__wtd.value, __wtd.set, None, 'Working time of departure.') # Attribute wtp uses Python identifier wtp __wtp = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'wtp'), 'wtp', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_CTD_ANON_wtp', _ImportedBinding_darwinpush_xb_ct.WTimeType) __wtp._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 253, 2) __wtp._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 253, 2) wtp = property(__wtp.value, __wtp.set, None, 'Working time of pass.') # Attribute pta uses Python identifier pta __pta = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'pta'), 'pta', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_CTD_ANON_pta', _ImportedBinding_darwinpush_xb_ct.RTTITimeType) __pta._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 258, 2) __pta._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 258, 2) pta = property(__pta.value, __pta.set, None, 'Public time of arrival.') # Attribute ptd uses Python identifier ptd __ptd = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ptd'), 'ptd', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_CTD_ANON_ptd', _ImportedBinding_darwinpush_xb_ct.RTTITimeType) __ptd._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 263, 2) __ptd._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTCommonTypes_v1.xsd', 263, 2) ptd = property(__ptd.value, __ptd.set, None, 'Public time of departure.') _ElementMap.update({ }) _AttributeMap.update({ __wta.name() : __wta, __wtd.name() : __wtd, __wtp.name() : __wtp, __pta.name() : __pta, __ptd.name() : __ptd }) # Complex type {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}TrainOrder with content type ELEMENT_ONLY class TrainOrder (pyxb.binding.basis.complexTypeDefinition): """Defines the expected Train order at a platform""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY _Abstract = False _ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'TrainOrder') _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 68, 1) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}set uses Python identifier set_ __set = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'set'), 'set_', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrder_httpwww_thalesgroup_comrttiPushPortTrainOrderv1set', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 73, 3), ) set_ = property(__set.value, __set.set, None, None) # Element {http://www.thalesgroup.com/rtti/PushPort/TrainOrder/v1}clear uses Python identifier clear __clear = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'clear'), 'clear', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrder_httpwww_thalesgroup_comrttiPushPortTrainOrderv1clear', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 74, 3), ) clear = property(__clear.value, __clear.set, None, 'Clear the current train order') # Attribute tiploc uses Python identifier tiploc __tiploc = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tiploc'), 'tiploc', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrder_tiploc', _ImportedBinding_darwinpush_xb_ct.TiplocType, required=True) __tiploc._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 80, 2) __tiploc._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 80, 2) tiploc = property(__tiploc.value, __tiploc.set, None, 'The tiploc where the train order applies') # Attribute crs uses Python identifier crs __crs = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'crs'), 'crs', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrder_crs', _ImportedBinding_darwinpush_xb_ct.CrsType, required=True) __crs._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 85, 2) __crs._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 85, 2) crs = property(__crs.value, __crs.set, None, 'The CRS code of the station where the train order applies') # Attribute platform uses Python identifier platform __platform = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'platform'), 'platform', '__httpwww_thalesgroup_comrttiPushPortTrainOrderv1_TrainOrder_platform', _ImportedBinding_darwinpush_xb_ct.PlatformType, required=True) __platform._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 90, 2) __platform._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 90, 2) platform = property(__platform.value, __platform.set, None, 'The platform number where the train order applies') _ElementMap.update({ __set.name() : __set, __clear.name() : __clear }) _AttributeMap.update({ __tiploc.name() : __tiploc, __crs.name() : __crs, __platform.name() : __platform }) Namespace.addCategoryObject('typeBinding', 'TrainOrder', TrainOrder) TrainOrderItem._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'rid'), CTD_ANON, scope=TrainOrderItem, documentation='For trains in the train order where the train is the Darwin timetable, it will be identified by its RID', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 21, 3))) TrainOrderItem._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'trainID'), _ImportedBinding_darwinpush_xb_ct.TrainIdType, scope=TrainOrderItem, documentation='Where a train in the train order is not in the Darwin timetable, a Train ID (headcode) will be supplied', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 37, 3))) def _BuildAutomaton (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton del _BuildAutomaton import pyxb.utils.fac as fac counters = set() states = [] final_update = set() symbol = pyxb.binding.content.ElementUse(TrainOrderItem._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'rid')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 21, 3)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() symbol = pyxb.binding.content.ElementUse(TrainOrderItem._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'trainID')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 37, 3)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] st_0._set_transitionSet(transitions) transitions = [] st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, False, containing_state=None) TrainOrderItem._Automaton = _BuildAutomaton() TrainOrderData._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'first'), TrainOrderItem, scope=TrainOrderData, documentation='The first train in the train order.', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 49, 3))) TrainOrderData._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'second'), TrainOrderItem, scope=TrainOrderData, documentation='The second train in the train order.', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 55, 4))) TrainOrderData._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'third'), TrainOrderItem, scope=TrainOrderData, documentation='The third train in the train order.', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 60, 4))) def _BuildAutomaton_ (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_ del _BuildAutomaton_ import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 54, 3)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 60, 4)) counters.add(cc_1) states = [] final_update = set() symbol = pyxb.binding.content.ElementUse(TrainOrderData._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'first')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 49, 3)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(TrainOrderData._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'second')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 55, 4)) st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False) states.append(st_1) final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(TrainOrderData._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'third')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 60, 4)) st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False) states.append(st_2) transitions = [] transitions.append(fac.Transition(st_1, [ ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_2, [ ])) st_1._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, True), fac.UpdateInstruction(cc_1, False) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) st_2._set_transitionSet(transitions) return fac.Automaton(states, counters, False, containing_state=None) TrainOrderData._Automaton = _BuildAutomaton_() TrainOrder._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'set'), TrainOrderData, scope=TrainOrder, location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 73, 3))) TrainOrder._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'clear'), pyxb.binding.datatypes.anyType, scope=TrainOrder, documentation='Clear the current train order', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 74, 3))) def _BuildAutomaton_2 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_2 del _BuildAutomaton_2 import pyxb.utils.fac as fac counters = set() states = [] final_update = set() symbol = pyxb.binding.content.ElementUse(TrainOrder._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'set')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 73, 3)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() symbol = pyxb.binding.content.ElementUse(TrainOrder._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'clear')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTTrainOrder_v1.xsd', 74, 3)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] st_0._set_transitionSet(transitions) transitions = [] st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, False, containing_state=None) TrainOrder._Automaton = _BuildAutomaton_2()
"""A collection of integration tests for the ephemeral container service. """ import base64 from ConfigParser import ConfigParser import httplib import os import signal import subprocess import tempfile import time import unittest import uuid from nose.plugins.attrib import attr from tor_async_util.nose_plugins import FileCapture import requests import ecs # # defines the api version that this set of tests validates # _api_version = r'v1.1' class ServiceConfig(object): """This context manager encapsulates the details of creating a configuration file suitable for determining how an instance of the ephemeral container service should operate. This class is intended make writing integration tests a little easlier/cleaner. """ def __init__(self): object.__init__(self) self._ip = '127.0.0.1' self._port = 8448 self.endpoint = 'http://%s:%d' % (self._ip, self._port) self.filename = None def __enter__(self): section = 'ecs' cp = ConfigParser() cp.add_section(section) cp.set(section, 'address', self._ip) cp.set(section, 'port', self._port) cp.set(section, 'log_level', 'info') cp.set(section, 'max_concurrent_executing_http_requests', '250') cp.set(section, 'docker_remote_api', 'http://172.17.0.1:2375') cp.set(section, 'docker_remote_api_connect_timeout', 3 * 1000) cp.set(section, 'docker_remote_api_request_timeout', 5 * 60 * 1000) self.filename = tempfile.mktemp() with open(self.filename, 'w+') as fp: cp.write(fp) FileCapture.watch(self.filename, type(self).__name__) return self def __exit__(self, exc_type, exc_value, traceback): pass class Service(object): """This context manager encapsulates the details behind spinning up the ephemeral container service and is intended make writing integration tests a little easlier/cleaner. """ def __init__(self, service_config): object.__init__(self) self.service_config = service_config self._stdout_file = None self._process = None def __enter__(self): self._stdout_file = tempfile.mktemp() FileCapture.watch(self._stdout_file, 'ecservice.py stdout') cmd = [ 'ecservice.py', '--config=%s' % self.service_config.filename, ] self._process = subprocess.Popen( cmd, stdout=open(self._stdout_file, 'w+'), stderr=subprocess.STDOUT, preexec_fn=os.setsid, ) url = '%s/%s/_noop' % (self.service_config.endpoint, _api_version) for i in range(0, 10): try: response = requests.get(url) if response.status_code == httplib.OK: return self except Exception: pass time.sleep(0.5) ex_msg = 'Could not confirm service started @ %s' % url ex = Exception(ex_msg) raise ex def __exit__(self, exc_type, exc_value, traceback): if self._process: os.killpg(self._process.pid, signal.SIGKILL) self._process = None class IntegrationTestCase(unittest.TestCase): """An abstract base class for all integration tests.""" def setup_env_and_run_func(self, the_test_func): endpoint = os.environ.get('ECS_ENDPOINT', None) if endpoint: key = os.environ.get('ECS_KEY', None) secret = os.environ.get('ECS_SECRET', None) auth = requests.auth.HTTPBasicAuth(key, secret) if key and secret else None the_test_func(endpoint, auth) else: with ServiceConfig() as service_config: with Service(service_config): the_test_func(service_config.endpoint, None) def avoid_rate_limiting_and_sleep_one_second(self): if os.environ.get('ECS_ENDPOINT', None): one_second = 1.0 time.sleep(one_second) def setUp(self): self.avoid_rate_limiting_and_sleep_one_second() def tearDown(self): pass @attr('integration') class NoOpTestCase(IntegrationTestCase): """A collection of integration tests for the /_noop endpoint.""" def test_happy_path(self): def the_test(endpoint, auth): url = '%s/%s/_noop' % (endpoint, _api_version) response = requests.get(url, auth=auth) self.assertEqual(response.status_code, httplib.OK) self.setup_env_and_run_func(the_test) @attr('integration') class VersionTestCase(IntegrationTestCase): """A collection of integration tests for the /_version endpoint.""" def test_happy_path_no_quick_arg(self): def the_test(endpoint, auth): url = '%s/%s/_version' % (endpoint, _api_version) response = requests.get(url, auth=auth) self.assertEqual(response.status_code, httplib.OK) expected_response = { 'version': ecs.__version__, 'links': { 'self': { 'href': url, } } } self.assertEqual(expected_response, response.json()) self.setup_env_and_run_func(the_test) @attr('integration') class HealthTestCase(IntegrationTestCase): """A collection of integration tests for the /_health endpoint.""" def test_happy_path_no_quick_arg(self): def the_test(endpoint, auth): url = '%s/%s/_health' % (endpoint, _api_version) response = requests.get(url, auth=auth) self.assertEqual(response.status_code, httplib.OK) self.setup_env_and_run_func(the_test) def test_happy_path_quick_arg_is_true(self): def the_test(endpoint, auth): url = '%s/%s/_health?quick=true' % (endpoint, _api_version) response = requests.get(url, auth=auth) self.assertEqual(response.status_code, httplib.OK) self.setup_env_and_run_func(the_test) def test_happy_path_quick_arg_is_false(self): def the_test(endpoint, auth): url = '%s/%s/_health?quick=false' % (endpoint, _api_version) response = requests.get(url, auth=auth) self.assertEqual(response.status_code, httplib.OK) self.setup_env_and_run_func(the_test) def test_happy_path_quick_arg_is_not_boolean(self): def the_test(endpoint, auth): url = '%s/%s/_health?quick=dave' % (endpoint, _api_version) response = requests.get(url, auth=auth) self.assertEqual(response.status_code, httplib.BAD_REQUEST) self.setup_env_and_run_func(the_test) @attr('integration') class TasksTestCase(IntegrationTestCase): """A collection of integration tests for the /_tasks endpoint.""" def _test_happy_path_with_simple_echo(self, trailing_slash): def the_test(endpoint, auth): url = '%s/%s/tasks%s' % ( endpoint, _api_version, '/' if trailing_slash else '', ) body = { 'docker_image': 'ubuntu:14.04', 'cmd': [ 'echo', 'hello world', ], } response = requests.post(url, auth=auth, json=body) self.assertEqual(response.status_code, httplib.CREATED) json_response_body = response.json() self.assertEqual(json_response_body['exitCode'], 0) self.assertEqual( base64.b64decode(json_response_body['stdout']).strip(), body['cmd'][1]) self.assertEqual( json_response_body['stderr'].strip(), '') self.setup_env_and_run_func(the_test) def test_happy_path_with_simple_echo_with_training_slash(self): self._test_happy_path_with_simple_echo(trailing_slash=True) def test_happy_path_with_simple_echo_without_training_slash(self): self._test_happy_path_with_simple_echo(trailing_slash=False) def test_non_zero_exit_code(self): def the_test(endpoint, auth): url = '%s/%s/tasks' % (endpoint, _api_version) exit_code = 1 body = { 'docker_image': 'ubuntu:14.04', 'cmd': [ 'bash', '-c', 'exit %d' % exit_code, ] } response = requests.post(url, auth=auth, json=body) self.assertEqual(response.status_code, httplib.CREATED) json_response_body = response.json() self.assertEqual(json_response_body['exitCode'], exit_code) self.assertEqual(json_response_body['stdout'].strip(), '') self.assertEqual(json_response_body['stderr'].strip(), '') self.setup_env_and_run_func(the_test) def test_stdout_and_stderr_output(self): def the_test(endpoint, auth): url = '%s/%s/tasks' % (endpoint, _api_version) stdout = uuid.uuid4().hex stderr = uuid.uuid4().hex body = { 'docker_image': 'ubuntu:14.04', 'cmd': [ 'bash', '-c', 'echo %s > /dev/stdout && echo %s > /dev/stderr' % (stdout, stderr) ] } response = requests.post(url, auth=auth, json=body) self.assertEqual(response.status_code, httplib.CREATED) json_response_body = response.json() self.assertEqual( base64.b64decode(json_response_body['stdout']).strip(), stdout) self.assertEqual( base64.b64decode(json_response_body['stderr']).strip(), stderr) self.setup_env_and_run_func(the_test) def test_unknown_docker_image(self): def the_test(endpoint, auth): url = '%s/%s/tasks' % (endpoint, _api_version) body = { 'docker_image': 'ubuntu:dave_was_here', 'cmd': [ 'echo', 'dave was here', ] } response = requests.post(url, auth=auth, json=body) self.assertEqual(response.status_code, httplib.NOT_FOUND) self.setup_env_and_run_func(the_test) def test_invalid_docker_image_name(self): def the_test(endpoint, auth): url = '%s/%s/tasks' % (endpoint, _api_version) body = { 'docker_image': 'IMAGE_NAME_IS_INVALID', 'cmd': [ 'echo', 'dave was here', ] } response = requests.post(url, auth=auth, json=body) self.assertEqual(response.status_code, httplib.BAD_REQUEST) self.setup_env_and_run_func(the_test) def test_bad_request_body(self): def the_test(endpoint, auth): bodies = [ { 'cmd': [ 'echo', 'dave was here', ] }, { 'docker_image': 'ubuntu:14.04', }, { 'docker_image': 'ubuntu:14.04', 'cmd': [ ] }, { }, ] for body in bodies: self.avoid_rate_limiting_and_sleep_one_second() url = '%s/%s/tasks' % (endpoint, _api_version) response = requests.post(url, auth=auth, json=body) self.assertEqual(response.status_code, httplib.BAD_REQUEST) self.setup_env_and_run_func(the_test)
import gevent import sys from gevent.server import StreamServer from JumpScale import j import inspect import time import os from PortalTCPChannels import ManholeSession, WorkerSession, TCPSessionLog try: import fcntl except: pass raise RuntimeError("is not working now") #THERE ARE SOME GOOD IDEAS IN HERE IN HOW TO BUILD A SOCKET SERVER WITH MANOLE, ... class PortalProcess(): """ """ def __init__(self, mainLoop=None, inprocess=False, cfgdir="", startdir=""): self.started = False # self.logs=[] # self.errors=[] self.epoch = time.time() self.lock = {} # j.errorconditionhandler.setExceptHook() #@does not do much? # Trigger the key value store extension so the enum is loaded self.cfgdir = cfgdir if self.cfgdir == "": self.cfgdir = "cfg" # check if the dir we got started from is a link, if so will create a new dir and copy the config files to there if j.system.fs.isLink(startdir, True): # we are link do not use this config info name = j.system.fs.getDirName(startdir + "/", True) + "_localconfig" newpath = j.system.fs.joinPaths(j.system.fs.getParent(startdir + "/"), name) if not j.system.fs.exists(newpath): j.system.fs.createDir(newpath) pathcfgold = j.system.fs.joinPaths(startdir, "cfg") j.system.fs.copyDirTree(pathcfgold, newpath) self.cfgdir = newpath ini = j.tools.inifile.open(self.cfgdir + "/portal.cfg") if ini.checkParam("main", "appdir"): self.appdir = self._replaceVar(ini.getValue("main", "appdir")) self.appdir=self.appdir.replace("$base",j.dirs.baseDir) else: self.appdir = j.system.fs.getcwd() # self.codepath=ini.getValue("main","codepath") # if self.codepath.strip()=="": #self.codepath=j.system.fs.joinPaths( j.dirs.varDir,"actorscode") # j.system.fs.createDir(self.codepath) # self.specpath=ini.getValue("main","specpath") # if self.specpath.strip()=="": # self.specpath="specs" # if not j.system.fs.exists(self.specpath): #raise RuntimeError("spec path does have to exist: %s" % self.specpath) dbtype = ini.getValue("main", "dbtype").lower().strip() if dbtype == "fs": self.dbtype = "FILE_SYSTEM" elif dbtype == "mem": self.dbtype = "MEMORY" elif dbtype == "redis": self.dbtype = "REDIS" elif dbtype == "arakoon": self.dbtype = "ARAKOON" else: raise RuntimeError("could not find appropriate core db, supported are: fs,mem,redis,arakoon, used here'%s'"%dbtype) # self.systemdb=j.db.keyvaluestore.getFileSystemStore("appserversystem",baseDir=self._replaceVar(ini.getValue("systemdb","dbdpath"))) self.wsport = int(ini.getValue("main", "webserverport")) secret = ini.getValue("main", "secret") admingroups = ini.getValue("main", "admingroups").split(",") # self.filesroot = self._replaceVar(ini.getValue("main", "filesroot")) if self.wsport > 0 and inprocess == False: self.webserver = j.core.portal.get(self.wsport, cfgdir=cfgdir,secret=secret,admingroups=admingroups) else: self.webserver = None self._greenLetsPath = j.system.fs.joinPaths(j.dirs.varDir, "portal_greenlets", self.wsport) j.system.fs.createDir(self._greenLetsPath) sys.path.append(self._greenLetsPath) self.tcpserver = None self.tcpservercmds = {} tcpserverport = int(ini.getValue("main", "tcpserverport", default=0)) if tcpserverport > 0 and inprocess == False: self.tcpserver = StreamServer(('0.0.0.0', tcpserverport), self.socketaccept) manholeport = int(ini.getValue("main", "manholeport", default=0)) self.manholeserver = None if manholeport > 0 and inprocess == False: self.manholeserver = StreamServer(('0.0.0.0', manholeport), self.socketaccept_manhole) if inprocess == False and (manholeport > 0 or tcpserverport > 0): self.sessions = {} self.nrsessions = 0 # self.messagerouter=MessageRouter() # self.logserver=None self.logserver_enable = False # if logserver==True: #self.logserver=StreamServer(('0.0.0.0', 6002), self.socketaccept_log) # self.logserver_enable=True # elif logserver<>None: # @todo configure the logging framework # pass self.ecserver_enable = False # self.ecserver=None #errorconditionserver # if ecserver==True: #self.ecserver=StreamServer(('0.0.0.0', 6003), self.socketaccept_ec) # self.ecserver_enable=True # elif ecserver<>None: # @todo configure the errorcondition framework # pass self.signalserver_enable = False # self.signalserver=None #signal handling # if signalserver==True: #self.signalserver=StreamServer(('0.0.0.0', 6004), self.socketaccept_signal) # self.signalserver_enable=True # elif signalserver<>None: # @todo configure the signal framework # pass self.mainLoop = mainLoop j.core.portal.active = self self.cfg = ini # toload=[] self.bootstrap() # if self.ismaster: # self.actorsloader.getActor("system", "master") # self.master = j.apps.system.master.extensions.master # # self.master._init() # # self.master.gridmapPrevious=None # # self.master.gridMapSave() # # self.master.gridMapRegisterPortal(self.ismaster,self.ipaddr,self.wsport,self.secret) # # look for nginx & start # #self.startNginxServer() # # self.scheduler = Scheduler() # else: # self.master = None # #from JumpScale.core.Shell import ipshellDebug,ipshell # # print "DEBUG NOW not implemented yet in appserver6process, need to connect to other master & master client" # # ipshell() self.loadFromConfig() def reset(self): self.bootstrap() self.loadFromConfig() def bootstrap(self): self.actorsloader.reset() self.actorsloader._generateLoadActor("system", "contentmanager", actorpath="system/system__contentmanager/") # self.actorsloader._generateLoadActor("system", "master", actorpath="system/system__master/") self.actorsloader._generateLoadActor("system", "usermanager", actorpath="system/system__usermanager/") self.actorsloader.scan("system") self.actorsloader.getActor("system", "usermanager") # self.actorsloader.getActor("system", "errorconditionhandler") # self.actorsloader._getSystemLoaderForUsersGroups() def loadFromConfig(self, reset=False): if reset: j.core.codegenerator.resetMemNonSystem() j.core.specparser.resetMemNonSystem() self.webserver.contentdirs = {} loader = self.actorsloader self.webserver.loadFromConfig4loader(loader, reset) def _replaceVar(self, txt): # txt = txt.replace("$base", j.dirs.baseDir).replace("\\", "/") txt = txt.replace("$appdir", j.system.fs.getcwd()).replace("\\", "/") txt = txt.replace("$vardir", j.dirs.varDir).replace("\\", "/") txt = txt.replace("$htmllibdir", j.html.getHtmllibDir()).replace("\\", "/") txt = txt.replace("\\", "/") return txt # def startNginxServer(self): # ini = j.tools.inifile.open("cfg/appserver.cfg") # local = int(ini.getValue("nginx", "local")) == 1 # configtemplate = j.system.fs.fileGetContents(j.system.fs.joinPaths(j.core.portal.getConfigTemplatesPath(), "nginx", "appserver_template.conf")) # configtemplate = self._replaceVar(configtemplate) # if local: # varnginx = j.system.fs.joinPaths(j.dirs.varDir, 'nginx') # j.system.fs.createDir(varnginx) # if j.system.platformtype.isWindows(): # apppath = self._replaceVar(ini.getValue("nginx", "apppath")).replace("\\", "/") # cfgpath = j.system.fs.joinPaths(apppath, "conf", "sites-enabled", "appserver.conf") # j.system.fs.writeFile(cfgpath, configtemplate) # apppath2 = j.system.fs.joinPaths(apppath, "start.bat") # cmd = "%s %s" % (apppath2, apppath) # cmd = cmd.replace("\\", "/").replace("//", "/") # extpath = inspect.getfile(self.__init__) # extpath = j.system.fs.getDirName(extpath) # maincfg = j.system.fs.joinPaths(extpath, "configtemplates", "nginx", "nginx.conf") # configtemplate2 = j.system.fs.fileGetContents(maincfg) # configtemplate2 = self._replaceVar(configtemplate2) # j.system.fs.writeFile("%s/conf/nginx.conf" % apppath, configtemplate2) # pid = j.system.windows.getPidOfProcess("nginx.exe") # if pid != None: # j.system.process.kill(pid) # pid = j.system.windows.getPidOfProcess("php-cgi.exe") # if pid != None: # j.system.process.kill(pid) # j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.varDir, "nginx")) # print "start nginx, cmd was %s" % (cmd) # j.system.process.executeAsync(cmd, outputToStdout=False) # else: # j.system.platform.ubuntu.check() # j.system.fs.remove("/etc/nginx/sites-enabled/default") # cfgpath = j.system.fs.joinPaths("/etc/nginx/sites-enabled", "appserver.conf") # j.system.fs.writeFile(cfgpath, configtemplate) # if not j.system.fs.exists("/etc/nginx/nginx.conf.backup"): # j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.varDir, "nginx")) # maincfg = j.system.fs.joinPaths(j.core.portal.getConfigTemplatesPath(), "nginx", "nginx.conf") # configtemplate2 = j.system.fs.fileGetContents(maincfg) # configtemplate2 = self._replaceVar(configtemplate2) # j.system.fs.copyFile("/etc/nginx/nginx.conf", "/etc/nginx/nginx.conf.backup") # j.system.fs.writeFile("/etc/nginx/nginx.conf", configtemplate2) # j.system.process.execute("/etc/init.d/nginx restart") # j.system.process.execute("/etc/init.d/nginx reload") # else: # pass # #raise RuntimeError("only supported in nginx mode") def activateActor(self, appname, actor): if not "%s_%s" % (appname, actor) in self.actors.keys(): # need to activate result = self.actorsloader.getActor(appname, actor) if result == None: # there was no actor return False def addTCPServerCmd(self, cmdName, function): self.tcpservercmds[cmdName] = function def setTcpServer(self, socketAcceptFunction): self.tcpserver = StreamServer(('0.0.0.0', 6000), socketAcceptFunction) def _addsession(self, session): self.sessions[self.nrsessions] = session session.sessionnr = self.nrsessions self.nrsessions += 1 session.ready() return self.nrsessions - 1 # this handler will be run for each incoming connection in a dedicated greenlet def socketaccept_manhole(self, socket, address): ip, port = address socket.sendall('Manhole For Portal Server \n\n') session = ManholeSession(ip, port, socket) self._addsession(session) session.run() def socketaccept(self, socket, address): ip, port = address session = WorkerSession(ip, port, socket) self._addsession(session) def socketaccept_log(self, socket, address): ip, port = address session = TCPSessionLog(ip, port, socket) self._addsession(session) # def socketaccept_ec(self,socket, address): # ip,port=address # session=TCPSessionEC(ip,port,socket) # self._addsession(session) # def socketaccept_signal(self,socket, address): # ip,port=address # session=TCPSessionSignal(ip,port,socket) # self._addsession(session) def _timer(self): """ will remember time every 1/10 sec """ while True: # self.epochbin=struct.pack("I",time.time()) self.epoch = time.time() gevent.sleep(0.1) # def _taskSchedulerTimer(self): # """ # every 4 seconds check maintenance queue # """ # while True: # gevent.sleep(5) # self.scheduler.check(self.epoch) def addQGreenlet(self, appName, greenlet): """ """ if self.webserver == None: return qGreenletObject = greenlet() if qGreenletObject.method == "": raise RuntimeError("greenlet class needs to have a method") if qGreenletObject.actor == "": raise RuntimeError("greenlet class needs to have a actor") qGreenletObject.server = self self.webserver.addRoute(function=qGreenletObject.wscall, appname=appName, actor=qGreenletObject.actor, method=qGreenletObject.method, paramvalidation=qGreenletObject.paramvalidation, paramdescription=qGreenletObject.paramdescription, paramoptional=qGreenletObject.paramoptional, description=qGreenletObject.description, auth=qGreenletObject.auth) def start(self, key=None, reset=False): # this is the trigger to start print "STARTING applicationserver on port %s" % self.wsport TIMER = gevent.greenlet.Greenlet(self._timer) TIMER.start() if self.mainLoop != None: MAINLOOP = gevent.greenlet.Greenlet(self.mainLoop) MAINLOOP.start() self.started = True if self.tcpserver != None: self.tcpserver.start() if self.manholeserver != None: self.manholeserver.start() if self.logserver_enable == True: self.logserver.start() if self.ecserver_enable == True: self.ecserver.start() if self.signalserver_enable == True: self.signalserver.start() # self.redirectErrors() if self.webserver != None: self.webserver.start(reset=reset) def processErrorConditionObject(self, eco): eco.process() def restartInProcess(self, app): args = sys.argv[:] args.insert(0, sys.executable) apppath = j.system.fs.joinPaths(j.dirs.appDir, app) max_fd = 1024 for fd in range(3, max_fd): try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) except IOError: continue fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) os.chdir(apppath) os.execv(sys.executable, args) # def getRedisClient(self,appname,actorname): # if ini.checkSection("redis"): # redisip=ini.getValue("redis","ipaddr") # redisport=ini.getValue("redis","port") #redisclient=redis.StrictRedis(host=redisip, port=int(redisport), db=0) # else: # redisclient=None # return redisclient
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_firewall_internet_service short_description: Show Internet Service application in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify firewall feature and internet_service category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 state: description: - Indicates whether to create or remove the object. This attribute was present already in previous version in a deeper level. It has been moved out to this outer level. type: str required: false choices: - present - absent version_added: 2.9 firewall_internet_service: description: - Show Internet Service application. default: null type: dict suboptions: state: description: - B(Deprecated) - Starting with Ansible 2.9 we recommend using the top-level 'state' parameter. - HORIZONTALLINE - Indicates whether to create or remove the object. type: str required: false choices: - present - absent database: description: - Database name this Internet Service belongs to. type: str choices: - isdb - irdb direction: description: - How this service may be used in a firewall policy (source, destination or both). type: str choices: - src - dst - both entry: description: - Entries in the Internet Service database. type: list suboptions: id: description: - Entry ID. required: true type: int ip_number: description: - Total number of IP addresses. type: int ip_range_number: description: - Total number of IP ranges. type: int port: description: - Integer value for the TCP/IP port (0 - 65535). type: int protocol: description: - Integer value for the protocol type as defined by IANA (0 - 255). type: int icon_id: description: - Icon ID of Internet Service. type: int id: description: - Internet Service ID. required: true type: int name: description: - Internet Service name. type: str offset: description: - Offset of Internet Service ID. type: int reputation: description: - Reputation level of the Internet Service. type: int sld_id: description: - Second Level Domain. type: int ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Show Internet Service application. fortios_firewall_internet_service: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" firewall_internet_service: database: "isdb" direction: "src" entry: - id: "6" ip_number: "7" ip_range_number: "8" port: "9" protocol: "10" icon_id: "11" id: "12" name: "default_name_13" offset: "14" reputation: "15" sld_id: "16" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_firewall_internet_service_data(json): option_list = ['database', 'direction', 'entry', 'icon_id', 'id', 'name', 'offset', 'reputation', 'sld_id'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def firewall_internet_service(data, fos): vdom = data['vdom'] if 'state' in data and data['state']: state = data['state'] elif 'state' in data['firewall_internet_service'] and data['firewall_internet_service']: state = data['firewall_internet_service']['state'] else: state = True firewall_internet_service_data = data['firewall_internet_service'] filtered_data = underscore_to_hyphen(filter_firewall_internet_service_data(firewall_internet_service_data)) if state == "present": return fos.set('firewall', 'internet-service', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('firewall', 'internet-service', mkey=filtered_data['id'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_firewall(data, fos): if data['firewall_internet_service']: resp = firewall_internet_service(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "firewall_internet_service": { "required": False, "type": "dict", "default": None, "options": { "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "database": {"required": False, "type": "str", "choices": ["isdb", "irdb"]}, "direction": {"required": False, "type": "str", "choices": ["src", "dst", "both"]}, "entry": {"required": False, "type": "list", "options": { "id": {"required": True, "type": "int"}, "ip_number": {"required": False, "type": "int"}, "ip_range_number": {"required": False, "type": "int"}, "port": {"required": False, "type": "int"}, "protocol": {"required": False, "type": "int"} }}, "icon_id": {"required": False, "type": "int"}, "id": {"required": True, "type": "int"}, "name": {"required": False, "type": "str"}, "offset": {"required": False, "type": "int"}, "reputation": {"required": False, "type": "int"}, "sld_id": {"required": False, "type": "int"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_firewall(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_firewall(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=line-too-long """ .. _tutorial-use-pass-instrument: How to Use TVM Pass Instrument ============================== **Author**: `Chi-Wei Wang <https://github.com/chiwwang>`_ As more and more passes are implemented, it becomes useful to instrument pass execution, analyze per-pass effects, and observe various events. We can instrument passes by providing a list of :py:class:`tvm.ir.instrument.PassInstrument` instances to :py:class:`tvm.transform.PassContext`. We provide a pass instrument for collecting timing information (:py:class:`tvm.ir.instrument.PassTimingInstrument`), but an extension mechanism is available via the :py:func:`tvm.instrument.pass_instrument` decorator. This tutorial demostrates how developers can use ``PassContext`` to instrument passes. Please also refer to the :ref:`pass-infra`. """ import tvm import tvm.relay as relay from tvm.relay.testing import resnet from tvm.contrib.download import download_testdata from tvm.relay.build_module import bind_params_by_name from tvm.ir.instrument import ( PassTimingInstrument, pass_instrument, ) ############################################################################### # Create An Example Relay Program # ------------------------------- # We use pre-defined resnet-18 network in Relay. batch_size = 1 num_of_image_class = 1000 image_shape = (3, 224, 224) output_shape = (batch_size, num_of_image_class) relay_mod, relay_params = resnet.get_workload(num_layers=18, batch_size=1, image_shape=image_shape) print("Printing the IR module...") print(relay_mod.astext(show_meta_data=False)) ############################################################################### # Create PassContext With Instruments # ----------------------------------- # To run all passes with an instrument, pass it via the ``instruments`` argument to # the ``PassContext`` constructor. A built-in ``PassTimingInstrument`` is used to # profile the execution time of each passes. timing_inst = PassTimingInstrument() with tvm.transform.PassContext(instruments=[timing_inst]): relay_mod = relay.transform.InferType()(relay_mod) relay_mod = relay.transform.FoldScaleAxis()(relay_mod) # before exiting the context, get profile results. profiles = timing_inst.render() print("Printing results of timing profile...") print(profiles) ############################################################################### # Use Current PassContext With Instruments # ---------------------------------------- # One can also use the current ``PassContext`` and register # ``PassInstrument`` instances by ``override_instruments`` method. # Note that ``override_instruments`` executes ``exit_pass_ctx`` method # if any instrument already exists. Then it switches to new instruments # and calls ``enter_pass_ctx`` method of new instruments. # Refer to following sections and :py:func:`tvm.instrument.pass_instrument` for these methods. cur_pass_ctx = tvm.transform.PassContext.current() cur_pass_ctx.override_instruments([timing_inst]) relay_mod = relay.transform.InferType()(relay_mod) relay_mod = relay.transform.FoldScaleAxis()(relay_mod) profiles = timing_inst.render() print("Printing results of timing profile...") print(profiles) ############################################################################### # Register empty list to clear existing instruments. # # Note that ``exit_pass_ctx`` of ``PassTimingInstrument`` is called. # Profiles are cleared so nothing is printed. cur_pass_ctx.override_instruments([]) # Uncomment the call to .render() to see a warning like: # Warning: no passes have been profiled, did you enable pass profiling? # profiles = timing_inst.render() ############################################################################### # Create Customized Instrument Class # ---------------------------------- # A customized instrument class can be created using the # :py:func:`tvm.instrument.pass_instrument` decorator. # # Let's create an instrument class which calculates the change in number of # occurrences of each operator caused by each pass. We can look at ``op.name`` to # find the name of each operator. And we do this before and after passes to calculate the difference. @pass_instrument class RelayCallNodeDiffer: def __init__(self): self._op_diff = [] # Passes can be nested. # Use stack to make sure we get correct before/after pairs. self._op_cnt_before_stack = [] def enter_pass_ctx(self): self._op_diff = [] self._op_cnt_before_stack = [] def exit_pass_ctx(self): assert len(self._op_cnt_before_stack) == 0, "The stack is not empty. Something wrong." def run_before_pass(self, mod, info): self._op_cnt_before_stack.append((info.name, self._count_nodes(mod))) def run_after_pass(self, mod, info): # Pop out the latest recorded pass. name_before, op_to_cnt_before = self._op_cnt_before_stack.pop() assert name_before == info.name, "name_before: {}, info.name: {} doesn't match".format( name_before, info.name ) cur_depth = len(self._op_cnt_before_stack) op_to_cnt_after = self._count_nodes(mod) op_diff = self._diff(op_to_cnt_after, op_to_cnt_before) # only record passes causing differences. if op_diff: self._op_diff.append((cur_depth, info.name, op_diff)) def get_pass_to_op_diff(self): """ return [ (depth, pass_name, {op_name: diff_num, ...}), ... ] """ return self._op_diff @staticmethod def _count_nodes(mod): """Count the number of occurrences of each operator in the module""" ret = {} def visit(node): if isinstance(node, relay.expr.Call): if hasattr(node.op, "name"): op_name = node.op.name else: # Some CallNode may not have 'name' such as relay.Function return ret[op_name] = ret.get(op_name, 0) + 1 relay.analysis.post_order_visit(mod["main"], visit) return ret @staticmethod def _diff(d_after, d_before): """Calculate the difference of two dictionary along their keys. The result is values in d_after minus values in d_before. """ ret = {} key_after, key_before = set(d_after), set(d_before) for k in key_before & key_after: tmp = d_after[k] - d_before[k] if tmp: ret[k] = d_after[k] - d_before[k] for k in key_after - key_before: ret[k] = d_after[k] for k in key_before - key_after: ret[k] = -d_before[k] return ret ############################################################################### # Apply Passes and Multiple Instrument Classes # -------------------------------------------- # We can use multiple instrument classes in a ``PassContext``. # However, it should be noted that instrument methods are executed sequentially, # obeying the order of ``instruments`` argument. # So for instrument classes like ``PassTimingInstrument``, it is inevitable to # count-up the execution time of other instrument classes to the final # profile result. call_node_inst = RelayCallNodeDiffer() desired_layouts = { "nn.conv2d": ["NHWC", "HWIO"], } pass_seq = tvm.transform.Sequential( [ relay.transform.FoldConstant(), relay.transform.ConvertLayout(desired_layouts), relay.transform.FoldConstant(), ] ) relay_mod["main"] = bind_params_by_name(relay_mod["main"], relay_params) # timing_inst is put after call_node_inst. # So the execution time of ``call_node.inst.run_after_pass()`` is also counted. with tvm.transform.PassContext(opt_level=3, instruments=[call_node_inst, timing_inst]): relay_mod = pass_seq(relay_mod) profiles = timing_inst.render() # Uncomment the next line to see timing-profile results. # print(profiles) ############################################################################### # We can see how many CallNode increase/decrease per op type. from pprint import pprint print("Printing the change in number of occurrences of each operator caused by each pass...") pprint(call_node_inst.get_pass_to_op_diff()) ############################################################################### # Exception Handling # ------------------ # Let's see what happens if an exception occurs in a method of a ``PassInstrument``. # # Define ``PassInstrument`` classes which raise exceptions in enter/exit ``PassContext``: class PassExampleBase: def __init__(self, name): self._name = name def enter_pass_ctx(self): print(self._name, "enter_pass_ctx") def exit_pass_ctx(self): print(self._name, "exit_pass_ctx") def should_run(self, mod, info): print(self._name, "should_run") return True def run_before_pass(self, mod, pass_info): print(self._name, "run_before_pass") def run_after_pass(self, mod, pass_info): print(self._name, "run_after_pass") @pass_instrument class PassFine(PassExampleBase): pass @pass_instrument class PassBadEnterCtx(PassExampleBase): def enter_pass_ctx(self): print(self._name, "bad enter_pass_ctx!!!") raise ValueError("{} bad enter_pass_ctx".format(self._name)) @pass_instrument class PassBadExitCtx(PassExampleBase): def exit_pass_ctx(self): print(self._name, "bad exit_pass_ctx!!!") raise ValueError("{} bad exit_pass_ctx".format(self._name)) ############################################################################### # If an exception occurs in ``enter_pass_ctx``, ``PassContext`` will disable the pass # instrumentation. And it will run the ``exit_pass_ctx`` of each ``PassInstrument`` # which successfully finished ``enter_pass_ctx``. # # In following example, we can see ``exit_pass_ctx`` of `PassFine_0` is executed after exception. demo_ctx = tvm.transform.PassContext( instruments=[ PassFine("PassFine_0"), PassBadEnterCtx("PassBadEnterCtx"), PassFine("PassFine_1"), ] ) try: with demo_ctx: relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Exceptions in ``PassInstrument`` instances cause all instruments of the current ``PassContext`` # to be cleared, so nothing is printed when ``override_instruments`` is called. demo_ctx.override_instruments([]) # no PassFine_0 exit_pass_ctx printed....etc ############################################################################### # If an exception occurs in ``exit_pass_ctx``, then the pass instrument is disabled. # Then exception is propagated. That means ``PassInstrument`` instances registered # after the one throwing the exception do not execute ``exit_pass_ctx``. demo_ctx = tvm.transform.PassContext( instruments=[ PassFine("PassFine_0"), PassBadExitCtx("PassBadExitCtx"), PassFine("PassFine_1"), ] ) try: # PassFine_1 execute enter_pass_ctx, but not exit_pass_ctx. with demo_ctx: relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Exceptions occured in ``should_run``, ``run_before_pass``, ``run_after_pass`` # are not handled explicitly -- we rely on the context manager (the ``with`` syntax) # to exit ``PassContext`` safely. # # We use ``run_before_pass`` as an example: @pass_instrument class PassBadRunBefore(PassExampleBase): def run_before_pass(self, mod, pass_info): print(self._name, "bad run_before_pass!!!") raise ValueError("{} bad run_before_pass".format(self._name)) demo_ctx = tvm.transform.PassContext( instruments=[ PassFine("PassFine_0"), PassBadRunBefore("PassBadRunBefore"), PassFine("PassFine_1"), ] ) try: # All exit_pass_ctx are called. with demo_ctx: relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Also note that pass instrumentation is not disable. So if we call # ``override_instruments``, the ``exit_pass_ctx`` of old registered ``PassInstrument`` # is called. demo_ctx.override_instruments([]) ############################################################################### # If we don't wrap pass execution with ``with`` syntax, ``exit_pass_ctx`` is not # called. Let try this with current ``PassContext``: cur_pass_ctx = tvm.transform.PassContext.current() cur_pass_ctx.override_instruments( [ PassFine("PassFine_0"), PassBadRunBefore("PassBadRunBefore"), PassFine("PassFine_1"), ] ) ############################################################################### # Then call passes. ``exit_pass_ctx`` is not executed after the exception, # as expectation. try: # No ``exit_pass_ctx`` got executed. relay_mod = relay.transform.InferType()(relay_mod) except ValueError as ex: print("Catching", str(ex).split("\n")[-1]) ############################################################################### # Clear instruments. cur_pass_ctx.override_instruments([])
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the Policy class for mixed precision training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import six from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module from tensorflow.python.keras.utils import generic_utils from tensorflow.python.platform import tf_logging from tensorflow.python.training.experimental import mixed_precision_global_state from tensorflow.python.util.tf_export import keras_export # Default value of certain arguments, indicating the default behavior for # that argument should be used. USE_DEFAULT = 'USE_DEFAULT' @keras_export('keras.mixed_precision.experimental.Policy', v1=[]) class Policy(object): """A dtype policy for a Keras layer. A dtype policy determines dtype-related aspects of a layer, such as its computation and variable dtypes. Each layer has a policy. Policies can be passed to the `dtype` argument of layer constructors, or a global policy can be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will default to the global policy if no policy is passed to it's constructor. For many models, each layer's policy will have the same compute dtype and variable dtype, which will typically be float32. In this case, we refer to the singular dtype as the layer's dtype, which can be queried by the property `tf.keras.layers.Layer.dtype`. When mixed precision training is used, most layers will instead have a float16 or bfloat16 compute dtype and a float32 variable dtype, and so the layer does not have a single dtype. When the variable dtype does not match the compute dtype, variables will be automatically casted to the compute dtype to avoid type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the variable dtype, not the compute dtype. See [the mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for more information on how to use mixed precision. Certain policies also have a `tf.mixed_precision.experimental.LossScale` instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss scaling is a technique used with mixed precision to avoid numerical underflow in float16 gradients. Loss scaling is only done by Models in `Model.fit`, `Model.train_on_batch`, and similar methods. Layers which are not Models ignore the loss scale. Policies are constructed by passing a string to the constructor, e.g. `tf.keras.mixed_precision.experimental.Policy('float32')`. The string determines the compute and variable dtypes. It can be one of the following: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. No loss scaling is done by default. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. These policies are used for mixed precision training. With 'mixed_float16', a dynamic loss scale is used by default. 'mixed_bfloat16' does no loss scaling by default, as loss scaling is unnecessary with bfloat16. ### How to use mixed precision in a Keras model To use mixed precision in a Keras model, the `'mixed_float16'` or `'mixed_bfloat16'` policy can be used. `tf.keras.mixed_precision.experimental.set_policy` can be used to set the default policy for layers if no policy is passed to them. For example: >>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... # Dense layers use global policy of 'mixed_float16', which does ... # computations in float16 while keeping variables in float32. ... tf.keras.layers.Dense(10), ... tf.keras.layers.Dense(10), ... # Softmax should be done in float32 for numeric stability. We pass ... # dtype='float32' to use float32 instead of the global policy. ... tf.keras.layers.Activation('softmax', dtype='float32') ... ]) Alternatively, the policy can be passed to individual layers instead of setting the global policy with `set_policy`: >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... tf.keras.layers.Dense(10, dtype=policy), ... tf.keras.layers.Dense(10, dtype=policy), ... # Softmax should be done in float32 for numeric stability. ... tf.keras.layers.Activation('softmax', dtype='float32') ... ]) Note the `'mixed_float16'` policy will apply loss scaling by default in `Model.fit`, `Model.train_on_batch`, and other training methods. If no such method is used (e.g., a custom training loop is used) and `'mixed_float16'` is used, the loss scale must be manually applied. See `tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For `'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be manually applied. See [the mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for more information on using mixed precision ### How to use float64 in a Keras model Using float64 is similar to mixed precision. Either the global policy can be set to float64, or `dtype='float64'` can be passed to individual layers. For example, to set the global policy: >>> tf.keras.mixed_precision.experimental.set_policy('float64') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... # All layers use global policy of 'float64', which does computations ... # and creates variables in float64. ... tf.keras.layers.Dense(10), ... tf.keras.layers.Dense(10), ... tf.keras.layers.Activation('softmax') ... ]) >>> # Optionaly set policy back to float32 if any other models use float32 >>> tf.keras.mixed_precision.experimental.set_policy('float32') ### How a layer uses its policy's compute dtype A layer will cast its inputs to its compute dtype in TensorFlow 2. For example: >>> x = tf.ones((4, 4, 4, 4), dtype='float64') >>> # `layer`'s policy defaults to float32. >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) >>> # `layer` casts it's inputs to its compute dtype, which is float32, and >>> # does computations in float32. >>> y = layer(x) >>> y.dtype tf.float32 Note that the base `tf.keras.layers.Layer` class inserts the casts. If subclassing your own layer, you do not have to insert any casts. Currently, only tensors in the first argument to the layer's `call` method are casted. For example: >>> class MyLayer(tf.keras.layers.Layer): ... # Bug! `b` will not be casted. ... def call(self, a, b): ... return a + 1., b + 1. >>> a = tf.constant(1., dtype="float32") >>> b = tf.constant(1., dtype="float32") >>> layer = MyLayer(dtype="float64") >>> x, y = layer(a, b) >>> x.dtype tf.float64 >>> y.dtype tf.float32 If writing your own layer, it is recommended to accept tensors only in the first argument. This way, all tensors are casted to the layer's compute dtype. `MyLayer` should therefore be written as: >>> class MyLayer(tf.keras.layers.Layer): ... # Now, all tensor inputs will be casted. ... def call(self, inputs): ... a, b = inputs ... return a + 1., b + 1. >>> a = tf.constant(1., dtype="float32") >>> b = tf.constant(1., dtype="float32") >>> layer = MyLayer(dtype="float64") >>> x, y = layer((a, b)) >>> x.dtype tf.float64 >>> y.dtype tf.float64 Other arguments are not automatically casted for technical reasons, but this may change in a future minor release. A layer subclass can prevent its inputs from being autocasted by passing `autocast=False` to the layer constructor. For example: >>> class NonAutoCastingLayer(tf.keras.layers.Layer): ... def __init__(self, **kwargs): ... kwargs['autocast'] = False ... super(NonAutoCastingLayer, self).__init__(**kwargs) ... def call(self, inp): ... return inp >>> x = tf.ones((4, 4, 4, 4), dtype='float32') >>> layer = NonAutoCastingLayer(dtype='float64') >>> y = layer(x) # Will not cast inputs to it's compute dtype of float64 >>> y.dtype tf.float32 ### How a layer uses its policy's variable dtype The default dtype of variables created by `tf.keras.layers.Layer.add_weight` is the layer's policy's variable dtype. If a layer's compute and variable dtypes differ, `add_weight` will wrap floating-point variables with a special wrapper called an `AutoCastVariable`. This wrapper is identical to the original variable except it casts itself to the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`, the variable is not casted. A layer author can prevent a variable from being wrapped with an `AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`: >>> class MyLayer(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.x = self.add_weight('x') ... self.y = self.add_weight('y', experimental_autocast=False) >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> layer = MyLayer(dtype=policy) >>> layer.build((2, 2)) >>> layer.x <AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, numpy=...> >>> layer.y <tf.Variable 'y:0' shape=() dtype=float32, numpy=...> Passing `experimental_autocast=False` is useful for layers which may internally do some math in the variable dtype instead of the compute dtype. For example, you may wish to compute variable statistics, such as mean and variance, in the variable dtype. ### How to write a layer that supports mixed precision and float64. For the most part, layers will automatically support mixed precision and float64 without any additional work, due to the fact the base layer automatically casts inputs, creates variables of the correct type, and in the case of mixed precision, wraps variables with `AutoCastVariables`. For example, this simple dense layer does not require any additional work to support mixed precision or float64. Keras automatically casts the inputs and variable to the appropriate dtype. >>> class MyDense(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.kernel = self.add_weight('kernel', (input_shape[-1], 10)) ... def call(self, inputs): ... return tf.matmul(inputs, self.kernel) >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> layer = MyDense(dtype=policy) >>> x = np.random.rand(10, 10) >>> y = layer(x) >>> y.dtype tf.float16 The primary case where you need extra work to support mixed precision or float64 is when you create a new tensor, such as with `tf.ones` or `tf.constant`. In such cases, you must create the tensor of the correct dtype. For example, suppose you modify the `MyDense` layer to add a random number to the output using `tf.random.normal`. You must pass the input dtype to `tf.random.normal` to ensure the dtypes match. >>> class MyDense(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.kernel = self.add_weight('kernel', (input_shape[-1], 10)) ... def call(self, inputs): ... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype) ... return tf.matmul(inputs, self.kernel) + rand >>> >>> layer = MyDense(dtype=policy) >>> y = layer(x) >>> y.dtype tf.float16 If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError` would have occurred. This is because the dtype defaults to `"float32"`, so the layer would only work if the inputs were float32. """ def __init__(self, name, loss_scale=USE_DEFAULT): """Constructs the policy. The `name` argument determines the compute and variable dtype, the default loss scale, and has no additional effect on the Policy. The compute and variable dtypes can only be specified through `name`, and cannot be specified directly. Args: name: A string. Can be one of the following values: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. With 'mixed_float16', a dynamic loss scale is used. These policies are used for mixed precision training. loss_scale: A `tf.mixed_precision.experimental.LossScale`, an int (which uses a `FixedLossScale`), or the string "dynamic" (which uses a `DynamicLossScale`). Defaults to using no loss scaling unless `name` is "mixed_float16", in which case this defaults to "dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and it is only used during `Model.fit`, `Model.train_on_batch`, and other similar methods. """ if isinstance(name, dtypes.DType): raise TypeError("'name' must be a string, not a DType. " "Instead, pass DType.name. Got: %s" % (name.name,)) elif not isinstance(name, six.string_types): raise TypeError("'name' must be a string, but got: %s" % (name,)) self._name = name self._compute_dtype, self._variable_dtype = self._parse_name(name) if loss_scale == USE_DEFAULT: loss_scale = 'dynamic' if name == 'mixed_float16' else None self._using_default_loss_scale = True else: self._using_default_loss_scale = False if loss_scale and self._compute_dtype not in (None, 'float16'): tf_logging.warn('Creating a Policy with a loss scale is only useful for ' 'float16 policies. You passed loss_scale=%r for policy ' '%s. Consider not passing any loss_scale instead.' % (loss_scale, name)) self._loss_scale = keras_loss_scale_module.get(loss_scale) if name in ('mixed_float16', 'mixed_bloat16'): device_compatibility_check.log_device_compatibility_check(name, skip_local=True) def _parse_name(self, name): """Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair. """ if name.endswith('_float32_vars'): error_msg = ('Policies ending in \'_float32_vars\' have been removed ' 'from TensorFlow.') if name in ('infer_float32_vars', 'infer_with_float32_vars'): error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' ' 'policy instead.') elif name == 'float16_with_float32_vars': error_msg += (' Please use the \'mixed_float16\' policy instead.') elif name == 'bfloat16_with_float32_vars': error_msg += (' Please use the \'mixed_bfloat16\' policy instead.') error_msg += ' Got policy name: \'%s\'' % name raise ValueError(error_msg) if name == 'mixed_float16': return 'float16', 'float32' elif name == 'mixed_bfloat16': return 'bfloat16', 'float32' elif name == '_infer': # The "_infer" policy exists only for compatibility with TF 1, where # "_infer" is the default. The behavior matches the behavior of TF 1's # behavior before policies were introduced. With "_infer", the computation # and variable dtype are inferred from the first input the first time the # layer is called. Once the layer is called for the first time, the # layer's policy will change to the dtype of the first input, and it will # no longer have the "_infer" policy. # # The infer policy should be considered an implementation detail and may # be removed in the future. return None, None try: dtype = dtypes.as_dtype(name).name except TypeError: error = ("Cannot convert value %s to a mixed precision Policy. " "Valid policies include include 'mixed_float16', " "'mixed_bfloat16', and the name of any dtype such as " "'float32'." % (name,)) # six.raise_from suppresses the original TypeError from being raised six.raise_from(ValueError(error), None) return dtype, dtype @property def variable_dtype(self): """The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicitly chooses a different dtype. If this is different than `Policy.compute_dtype`, Layers will cast variables to the compute dtype to avoid type errors. Returns: The variable dtype of this policy. """ return self._variable_dtype @property def compute_dtype(self): """The compute dtype of this policy. This is the dtype layers will do their computations in. Note that even if the compute dtype is float16 or bfloat16, hardware devices may not do individual adds, multiplies, and other fundamental operations in [b]float16, but instead may do some of them in float32 for numeric stability. The compute dtype is the dtype of the inputs and outputs of the TensorFlow ops that the layer executes. Internally, many TensorFlow ops will do certain internal calculations in float32, or some other device-internal intermediate format with higher precision than [b]float16, to increase numeric stability. For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul will do use float32 intermediate math. The performance benefit of float16 is still apparent, due to increased memory bandwidth and the fact modern GPUs have specialized hardware for computing matmuls on float16 while still keeping intermediate computations in float32. Returns: The compute dtype of this policy. """ return self._compute_dtype @property def should_cast_variables(self): """Returns True if variables should be casted. This is true if the variable dtype is not the same as the compute dtype. Returns: True, if variables should be casted. """ return self.variable_dtype != self.compute_dtype @property def loss_scale(self): """Returns the loss scale of this Policy. Returns: A `tf.mixed_precision.experimental.LossScale`, or None. """ return self._loss_scale @property def name(self): """Returns the name of this policy.""" return self._name def __repr__(self): return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale) def get_config(self): config = { 'name': self.name } if not self._using_default_loss_scale: # We only include the loss scale if the default loss scale is not used. # This allows us to change the loss scale config format without breaking # users who use the default loss scale. config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale) return config @classmethod def from_config(cls, config, custom_objects=None): if 'loss_scale' in config and isinstance(config['loss_scale'], dict): config = config.copy() config['loss_scale'] = keras_loss_scale_module.deserialize( config['loss_scale'], custom_objects=custom_objects) return cls(**config) # The current global policy in effect. If None, it means the current value of # floatx should be used as the policy if the V2 dtype behavior is enabled, # or "_infer" otherwise. # TODO(reedwm): Make this thread local? _global_policy = None @keras_export('keras.mixed_precision.experimental.global_policy', v1=[]) def global_policy(): """Returns the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no policy has been set with `keras.mixed_precision.experimental.set_policy`, this will return a policy constructed from `tf.keras.backend.floatx()` (floatx defaults to float32). If TensorFlow 2 behavior has been disabled with `tf.compat.v1.disable_v2_behavior()`, this will instead return a special "_infer" policy which infers the dtype from the dtype of the first input the first time the layer is called. This behavior matches the behavior that existed in TensorFlow 1. See `tf.keras.mixed_precision.experimental.Policy` for more information on policies. Returns: The global Policy. """ if _global_policy is None: if base_layer_utils.v2_dtype_behavior_enabled(): return Policy(backend.floatx()) else: return Policy('_infer') return _global_policy def policy_defaults_to_floatx(): """Returns True if `global_policy()` will use the current value of floatx.""" return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled() def _check_if_mixed_precision_graph_rewrite_is_enabled(policy): if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled: raise ValueError( 'The global dtype policy cannot be set to "{policy.name}", because the ' 'mixed precision graph rewrite has already been enabled.\n' 'At most, one of the following can be called:\n\n' ' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() ' '(You called this first)\n' ' 2. tf.keras.mixed_precision.experimental.set_policy() with a mixed ' 'precision policy (You called this second)\n\n' 'You called both functions, which is an error, because both functions ' 'enable you to use mixed precision. If in doubt which function to use, ' 'use the second, as it supports Eager execution and is more ' 'customizable.'.format(policy=policy)) @keras_export('keras.mixed_precision.experimental.set_policy', v1=[]) def set_policy(policy): """Sets the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no global policy is set, layers will instead default to a Policy constructed from `tf.keras.backend.floatx()`. See `keras.mixed_precision.experimental.Policy` for more information. Args: policy: A Policy, or a string that will be converted to a Policy.. """ global _global_policy if not base_layer_utils.v2_dtype_behavior_enabled(): raise ValueError('The global policy can only be set in TensorFlow 2') if policy is not None and not isinstance(policy, Policy): policy = Policy(policy) is_mixed_policy = policy is not None and policy.should_cast_variables if is_mixed_policy: _check_if_mixed_precision_graph_rewrite_is_enabled(policy) _global_policy = policy mixed_precision_global_state.using_mixed_precision_policy = is_mixed_policy # TODO(reedwm): Make this thread local @contextlib.contextmanager def policy_scope(policy): """A context manager that sets the global Policy under it. Args: policy: A Policy, or a string that will be converted to a Policy.. Yields: Nothing. """ old_policy = _global_policy try: set_policy(policy) yield finally: set_policy(old_policy) def _is_convertible_to_dtype(dtype): try: dtypes.as_dtype(dtype) return True except TypeError: return False def _policy_equivalent_to_dtype(policy): """Returns True if the Policy is equivalent to a single dtype. A policy is equivalent to a single dtype if the policy's compute and variable dtypes are the same and the policy does not cause the layer/model to have additional behavior, such as loss scaling. The "_infer" policy is considered equivalent to a single dtype. Args: policy: A Policy. Returns: True, if the policy is equivalent to a single dtype. """ # We use type() instead of isinstance because a sublcass of Policy is never # equivalent to a dtype. return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck list(policy.get_config().keys()) == ['name'] and (policy.name == '_infer' or _is_convertible_to_dtype(policy.name))) def serialize(policy): if _policy_equivalent_to_dtype(policy): # We return either None or the policy name for compatibility with older # versions of Keras. If the policy name is returned, it is a dtype string # such as 'float32'. return None if policy.name == '_infer' else policy.name return generic_utils.serialize_keras_object(policy) def deserialize(config, custom_objects=None): if isinstance(config, str) and _is_convertible_to_dtype(config): return Policy(config) if config is None: return Policy('_infer') module_objects = {'Policy': Policy} return generic_utils.deserialize_keras_object( config, module_objects=module_objects, custom_objects=custom_objects, printable_module_name='dtype policy')
"""Common operations on Posix pathnames. Instead of importing this module directly, import os and refer to this module as os.path. The "os.path" name is an alias for this module on Posix systems; on other systems (e.g. Mac, Windows), os.path provides the same operations in a manner specific to that platform, and is an alias to another module (e.g. macpath, ntpath). Some of this can actually be useful on non-Posix systems too, e.g. for manipulation of the pathname component of URLs. """ import os import sys import stat import genericpath from genericpath import * __all__ = ["normcase","isabs","join","splitdrive","split","splitext", "basename","dirname","commonprefix","getsize","getmtime", "getatime","getctime","islink","exists","lexists","isdir","isfile", "ismount", "expanduser","expandvars","normpath","abspath", "samefile","sameopenfile","samestat", "curdir","pardir","sep","pathsep","defpath","altsep","extsep", "devnull","realpath","supports_unicode_filenames","relpath"] # Strings representing various path-related bits and pieces. # These are primarily for export; internally, they are hardcoded. curdir = '.' pardir = '..' extsep = '.' sep = '/' pathsep = ':' defpath = ':/bin:/usr/bin' altsep = None devnull = '/dev/null' def _get_sep(path): if isinstance(path, bytes): return b'/' else: return '/' # Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. # On MS-DOS this may also turn slashes into backslashes; however, other # normalizations (such as optimizing '../' away) are not allowed # (another function should be defined to do that). def normcase(s): """Normalize case of pathname. Has no effect under Posix""" # TODO: on Mac OS X, this should really return s.lower(). return s # Return whether a path is absolute. # Trivial in Posix, harder on the Mac or MS-DOS. def isabs(s): """Test whether a path is absolute""" sep = _get_sep(s) return s.startswith(sep) # Join pathnames. # Ignore the previous parts if a part is absolute. # Insert a '/' unless the first part is empty or already ends in '/'. def join(a, *p): """Join two or more pathname components, inserting '/' as needed. If any component is an absolute path, all previous path components will be discarded.""" sep = _get_sep(a) path = a for b in p: if b.startswith(sep): path = b elif not path or path.endswith(sep): path += b else: path += sep + b return path # Split a path in head (everything up to the last '/') and tail (the # rest). If the path ends in '/', tail will be empty. If there is no # '/' in the path, head will be empty. # Trailing '/'es are stripped from head unless it is the root. def split(p): """Split a pathname. Returns tuple "(head, tail)" where "tail" is everything after the final slash. Either part may be empty.""" sep = _get_sep(p) i = p.rfind(sep) + 1 head, tail = p[:i], p[i:] if head and head != sep*len(head): head = head.rstrip(sep) return head, tail # Split a path in root and extension. # The extension is everything starting at the last dot in the last # pathname component; the root is everything before that. # It is always true that root + ext == p. def splitext(p): if isinstance(p, bytes): sep = b'/' extsep = b'.' else: sep = '/' extsep = '.' return genericpath._splitext(p, sep, None, extsep) splitext.__doc__ = genericpath._splitext.__doc__ # Split a pathname into a drive specification and the rest of the # path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. def splitdrive(p): """Split a pathname into drive and path. On Posix, drive is always empty.""" return p[:0], p # Return the tail (basename) part of a path, same as split(path)[1]. def basename(p): """Returns the final component of a pathname""" sep = _get_sep(p) i = p.rfind(sep) + 1 return p[i:] # Return the head (dirname) part of a path, same as split(path)[0]. def dirname(p): """Returns the directory component of a pathname""" sep = _get_sep(p) i = p.rfind(sep) + 1 head = p[:i] if head and head != sep*len(head): head = head.rstrip(sep) return head # Is a path a symbolic link? # This will always return false on systems where os.lstat doesn't exist. def islink(path): """Test whether a path is a symbolic link""" try: st = os.lstat(path) except (os.error, AttributeError): return False return stat.S_ISLNK(st.st_mode) # Being true for dangling symbolic links is also useful. def lexists(path): """Test whether a path exists. Returns True for broken symbolic links""" try: st = os.lstat(path) except os.error: return False return True # Are two filenames really pointing to the same file? def samefile(f1, f2): """Test whether two pathnames reference the same actual file""" s1 = os.stat(f1) s2 = os.stat(f2) return samestat(s1, s2) # Are two open files really referencing the same file? # (Not necessarily the same file descriptor!) def sameopenfile(fp1, fp2): """Test whether two open file objects reference the same file""" s1 = os.fstat(fp1) s2 = os.fstat(fp2) return samestat(s1, s2) # Are two stat buffers (obtained from stat, fstat or lstat) # describing the same file? def samestat(s1, s2): """Test whether two stat buffers reference the same file""" return s1.st_ino == s2.st_ino and \ s1.st_dev == s2.st_dev # Is a path a mount point? # (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) def ismount(path): """Test whether a path is a mount point""" try: s1 = os.lstat(path) if isinstance(path, bytes): parent = join(path, b'..') else: parent = join(path, '..') s2 = os.lstat(parent) except os.error: return False # It doesn't exist -- so not a mount point :-) dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: return True # path/.. on a different device as path ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: return True # path/.. is the same i-node as path return False # Expand paths beginning with '~' or '~user'. # '~' means $HOME; '~user' means that user's home directory. # If the path doesn't begin with '~', or if the user or $HOME is unknown, # the path is returned unchanged (leaving error reporting to whatever # function is called with the expanded path as argument). # See also module 'glob' for expansion of *, ? and [...] in pathnames. # (A function should also be defined to do full *sh-style environment # variable expansion.) def expanduser(path): """Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.""" if isinstance(path, bytes): tilde = b'~' else: tilde = '~' if not path.startswith(tilde): return path sep = _get_sep(path) i = path.find(sep, 1) if i < 0: i = len(path) if i == 1: if 'HOME' not in os.environ: import pwd userhome = pwd.getpwuid(os.getuid()).pw_dir else: userhome = os.environ['HOME'] else: import pwd name = path[1:i] if isinstance(name, bytes): name = str(name, 'ASCII') try: pwent = pwd.getpwnam(name) except KeyError: return path userhome = pwent.pw_dir if isinstance(path, bytes): userhome = userhome.encode(sys.getfilesystemencoding()) userhome = userhome.rstrip(sep) return userhome + path[i:] # Expand paths containing shell variable substitutions. # This expands the forms $variable and ${variable} only. # Non-existent variables are left unchanged. _varprog = None _varprogb = None def expandvars(path): """Expand shell variables of form $var and ${var}. Unknown variables are left unchanged.""" global _varprog, _varprogb if isinstance(path, bytes): if b'$' not in path: return path if not _varprogb: import re _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII) search = _varprogb.search start = b'{' end = b'}' else: if '$' not in path: return path if not _varprog: import re _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII) search = _varprog.search start = '{' end = '}' i = 0 while True: m = search(path, i) if not m: break i, j = m.span(0) name = m.group(1) if name.startswith(start) and name.endswith(end): name = name[1:-1] if isinstance(name, bytes): name = str(name, 'ASCII') if name in os.environ: tail = path[j:] value = os.environ[name] if isinstance(path, bytes): value = value.encode('ASCII') path = path[:i] + value i = len(path) path += tail else: i = j return path # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. # It should be understood that this may change the meaning of the path # if it contains symbolic links! def normpath(path): """Normalize path, eliminating double slashes, etc.""" if isinstance(path, bytes): sep = b'/' empty = b'' dot = b'.' dotdot = b'..' else: sep = '/' empty = '' dot = '.' dotdot = '..' if path == empty: return dot initial_slashes = path.startswith(sep) # POSIX allows one or two initial slashes, but treats three or more # as single slash. if (initial_slashes and path.startswith(sep*2) and not path.startswith(sep*3)): initial_slashes = 2 comps = path.split(sep) new_comps = [] for comp in comps: if comp in (empty, dot): continue if (comp != dotdot or (not initial_slashes and not new_comps) or (new_comps and new_comps[-1] == dotdot)): new_comps.append(comp) elif new_comps: new_comps.pop() comps = new_comps path = sep.join(comps) if initial_slashes: path = sep*initial_slashes + path return path or dot def abspath(path): """Return an absolute path.""" if not isabs(path): if isinstance(path, bytes): cwd = os.getcwdb() else: cwd = os.getcwd() path = join(cwd, path) return normpath(path) # Return a canonical path (i.e. the absolute location of a file on the # filesystem). def realpath(filename): """Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path.""" if isinstance(filename, bytes): sep = b'/' empty = b'' else: sep = '/' empty = '' if isabs(filename): bits = [sep] + filename.split(sep)[1:] else: bits = [empty] + filename.split(sep) for i in range(2, len(bits)+1): component = join(*bits[0:i]) # Resolve symbolic links. if islink(component): resolved = _resolve_link(component) if resolved is None: # Infinite loop -- return original component + rest of the path return abspath(join(*([component] + bits[i:]))) else: newpath = join(*([resolved] + bits[i:])) return realpath(newpath) return abspath(filename) def _resolve_link(path): """Internal helper function. Takes a path and follows symlinks until we either arrive at something that isn't a symlink, or encounter a path we've seen before (meaning that there's a loop). """ paths_seen = [] while islink(path): if path in paths_seen: # Already seen this path, so we must have a symlink loop return None paths_seen.append(path) # Resolve where the link points to resolved = os.readlink(path) if not isabs(resolved): dir = dirname(path) path = normpath(join(dir, resolved)) else: path = normpath(resolved) return path supports_unicode_filenames = False def relpath(path, start=None): """Return a relative version of a path""" if not path: raise ValueError("no path specified") if isinstance(path, bytes): curdir = b'.' sep = b'/' pardir = b'..' else: curdir = '.' sep = '/' pardir = '..' if start is None: start = curdir start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) # Work out how much of the filepath is shared by start and path. i = len(commonprefix([start_list, path_list])) rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list)
# Author: Vikram Melapudi aka makechaos (makechaos [at] gmail [dot] com) # Updated: 07 Jan 2014 # Started: 25 Dec 2013 # Summary: Backend of python server to process the user input and link with the MongoDB database import photoServer import web import StringIO import string from PIL import Image from pymongo import MongoClient hostid='http://192.168.1.4:1451' client = MongoClient('localhost',1951) dbName = 'PhotoDB-6' db = client[dbName] coll = db['Photos'] gopt = '' showSizeText = False dfmt = '%d-%b-%Y' albumList = None imPerPage = 50 ## ******************************************************************************************** ## ALL HTML PAGE CODES ## ******************************************************************************************** def initHTML(): txt = '<html><head><meta name="viewport" content="width=device-width, user-scalable=no">\n' txt+= '<style type="text/css">\n' txt+= '* {font-family: arial; border:0px; font-size:12px;}\n' txt+= 'txt {font-weight: bold;}\n' txt+= 'button {background-color:#774477; color:white; margin:5px;}\n' txt+= '.bodybtn {background-color:lightgray; color:black; }\n' txt+= '.greenbtn {cursor: pointer; float:left; display:block; margin:5px; padding:5px; border-radius: 5px; background-color:lightgreen;}\n' txt+= '.graybtn {cursor: pointer; float:left; display:block; margin:5px; padding:5px; border-radius: 5px; background-color:lightgray;}\n' txt+= '.bluebtn {cursor: pointer; float:left; display:block; margin:5px; padding:5px; border-radius: 5px; background-color:lightblue;}\n' txt+= '.sel {margin:5px; float:left; width:160px; background-color:lightblue; position:relative;}\n' txt+= '.nosel {margin:5px; float:left; width:160px; background-color:white; position:relative;}\n' txt+= '</style></head>\n' return txt def homeBanner(src): txt = '<div align="center">' txt += '<table style="width:100%; background-color:#663366;"><tr>\n' #txt +='<td><button onclick=bylist("Albums")>By Albums</button></td>' #txt +='<td><button onclick=bylist("Tags")>By Tags</button></td>' #txt +='<td><button onclick=bylist("Date")>By Date</button></td>' #txt +='<td><button onclick=bylist("Model")>By Model</button></td>' txt += '<td><div style="margin:10px;"><a href="'+hostid+'/home" style="font-size:20px; color:white;">PhotoAccess v2.0</a></td>\n' txt += '<td><div style="font-size:14px; color:white;">Show <select id="opt" style="font-size:14px; border:0px; background-color:#663366; color:white;"> <option value="public">Public</option> <option value="all">All</option> <option value="private">Private</option> <option value="NoAlbum">No Albums</option> <option value="notag">No Tags</option> </select></div></a>\n' txt +='</tr>\n' txt +='</table></div><hr>\n' return txt def albumsDropdown(): global albumList txt ='<select id="albums" style="background-color:#996699; color:#050505; border:0px;">\n' txt +='<option value="create-new" style="font-weight:bold;">New Album</option>\n' if albumList==None: albumList = getEntries('Albums') for m in albumList: dval = m[0] if len(dval)>15: dval = dval[:13]+'..' txt += '<option value="%s" style="color:blue;" >%s (%d)</option>\n'%(m[0],dval,m[1]) txt +='</select>\n' return txt def imageBanner(): txt = '<div align="center">\n' txt +='<table style="width:100%; background-color:#663366;"><tr>\n' txt += '<td><div style="margin:10px;"><a href="'+hostid+'/home" style="font-size:20px; color:white;">PhotoAccess v2.0</a></div></td>\n' txt += '<td><button onclick="selall()">Toggle Sel.</button>\n' txt +=albumsDropdown() txt +='<button margin=10px onclick="addAlbum()" id="album">To Album</button>\n' txt +='<button margin=10px onclick="addTags()" id="tags">Tag-em</button>\n' txt +='<button margin=10px onclick="setPrivate()">As Private</button></td>\n' txt +='</tr>' txt +='</table></div><hr>' return txt def imageBannerAlbums(): txt = '<div align="center">\n' txt +='<table style="width:100%; background-color:#663366;"><tr>\n' txt += '<td><div style="margin:10px;"><a href="'+hostid+'/home" style="font-size:20px; color:white;">PhotoAccess v2.0</a></div></td>\n' txt += '<td><button onclick="selall()">Toggle Sel.</button>\n' txt +=albumsDropdown() txt +='<button margin=10px onclick="addAlbum()" id="album">To Album</button>\n' txt +='<button margin=10px onclick="rmAlbum()" id="album">Remove-em</button>\n' txt +='<button margin=10px onclick="addTags()" id="rmAlbum">Tag-em</button>\n' txt +='<button margin=10px onclick="setPrivate()">As Private</button></td>\n' txt +='</tr>\n' txt +='</table></div><hr>\n' return txt def imageBannerTags(): txt = '<div align="center">\n' txt +='<table style="width:100%; background-color:#663366;"><tr>\n' txt += '<td><div style="margin:10px;"><a href="'+hostid+'/home" style="font-size:20px; color:white;">PhotoAccess v2.0</a></div></td>\n' txt += '<td><button onclick="selall()">Toggle Sel.</button>\n' txt +=albumsDropdown() txt +='<button margin=10px onclick="addAlbum()" id="album">To Album</button>\n' txt +='<button margin=10px onclick="rmTag()" id="rmTag">Remove-em</button>\n' txt +='<button margin=10px onclick="addTags()" id="tags">Tag-em</button>\n' txt +='<button margin=10px onclick="setPrivate()">As Private</button></td>\n' txt +='</tr>\n' txt +='</table></div><hr>\n' return txt def addJS(src='',sel=''): txt = '<head><script>' txt += 'var nsel=0; var gsel="'+sel+'"; var hostid="'+hostid+'"; var gopt="/opt-'+gopt+'"; var query="'+src.replace('/',':')+'";\n' txt += 'function imselect0(id){el=document.getElementById(id); if(el.getAttribute("sel")=="0") { nsel++; gsel+=","+id; var stxt=el.style.cssText; stxt=stxt.substring(0, stxt.indexOf("background-color")); el.style.cssText =stxt+"background-color:lightblue;"; el.setAttribute("sel","1");} else { nsel--; el.setAttribute("sel","0"); var stxt=el.style.cssText; stxt=stxt.substring(0, stxt.indexOf("background-color")); el.style.cssText = stxt +"background-color:white;"; var sel=gsel+","; sel=sel.replace(","+id+",",","); gsel=sel.substring(0,sel.length-1);} }\n' txt += 'function imselect(id){el=document.getElementById("d"+id); if (el==null) { rmSel(id); return; } if(el.getAttribute("sel")=="0") { nsel++; gsel+=","+id; el.setAttribute("class","sel"); el.setAttribute("sel","1");} else { nsel--; el.setAttribute("sel","0"); el.setAttribute("class","nosel"); rmSel(id); }}\n' txt += 'function rmSel(id) { var sel=gsel+","; sel=sel.replace(","+id+",",","); gsel=sel.substring(0,sel.length-1); }\n' #txt += 'function resize(){location.assign("'+location.host+'/size=256");}'; #txt += 'function nameChange() {txt=document.getElementById("name").value+"/"+gsel; document.getElementById("album").setAttribute("href",hostid+"/album/"+txt); document.getElementById("tags").setAttribute("href",hostid+"/tags/"+txt); alert(hostid+"/album/"+txt);}' txt += 'function validName() { if(document.getElementById("name").value.length<1) { alert("No name provided!"); return flase;} else return true; }\n' txt += 'function option() { var el=document.getElementById("opt"); return "/opt-"+el.options[el.selectedIndex].text; }\n' txt += 'function bylist(val) { window.location.href=hostid+option()+"/list/"+val; }\n' txt += 'function selNext(val) { window.location.href=hostid+"/"+val+"/sel="+gsel; }\n' txt += 'function selPrev(val) { window.location.href=hostid+"/"+val+"/sel="+gsel; }\n' txt += 'function showSelect() { window.location.href=hostid+"/select/"+query+gsel; }\n' txt += 'function addAlbum() {var el=document.getElementById("albums"); var nm=el.options[el.selectedIndex].value; if(nm.indexOf("create-new")==0) nm=prompt("Enter new album name:"); postPage(hostid+gopt+"/album/"+nm+"/"+query+gsel, false); }\n' txt += 'function postPage(url, reload) { /*window.location.href = url; */ var txt=httpGET(url); if(reload) location.reload(); showInfo( txt ); var ss=gsel.split(","); for(var m=1;m<ss.length;m++) { imselect(ss[m]); } gsel=""; }\n' txt += 'function impage() {var el=document.getElementById("impage"); var pg=el.options[el.selectedIndex].value; window.location.href=hostid+gopt+"/"+pg+"/sel="+gsel}\n' txt += 'function setPrivate() { postPage(hostid+gopt+"/private/"+query+gsel, false); }\n' txt += 'function httpGET(url) { var xmlHttp=null; xmlHttp=new XMLHttpRequest(); xmlHttp.open("GET", url, false); xmlHttp.send(null); return xmlHttp.responseText; }\n' txt += 'function rmAlbum() { var ss=gsel.split(","); for(var m=1;m<ss.length;m++) {el=document.getElementById(ss[m]); el.style.cssText+="visibility:hidden;"; } postPage(hostid+gopt+"/rmAlbum/"+query+gsel, true); }\n' txt += 'function rmTag() { var ss=gsel.split(","); for(var m=1;m<ss.length;m++) {el=document.getElementById(ss[m]); el.style.cssText+="visibility:hidden;"; } postPage(hostid+gopt+"/rmTag/"+query+gsel, true); }\n' txt += 'function addTags() { var tg=prompt("Enter the tags (separated by comma):"); postPage(hostid+gopt+"/tags/"+tg+"/"+query+gsel, false); }\n' txt += 'function iminfo(e,msg) { return; var el=document.getElementById("info"); el.innerHTML=msg; el.style.cssText="font-size:14px; background-color:#FFFF99; width:100%; color:#404040; visibility:visible; position:absolute; left:0px; top:0px;"; }\n' txt += 'function showInfo(msg) { var el=document.getElementById("info"); el.innerHTML=msg; el.style.cssText="font-size:11px; background-color:#FFFF99; width:100%; color:#404040; visibility:visible; position:absolute; left:0px; top:0px;"; }\n' txt += 'function selall() { var els=document.getElementsByTagName("img"); for (var el=0; el<els.length; el++){ imselect(els[el].getAttribute("id")); } }\n' txt += 'function hideinfo() { var el=document.getElementById("info"); el.innerHTML=""; el.style.cssText="visibility:hidden;"; }\n' txt += '</script></head>' return txt def errorPage(err): txt = initHTML()+addJS()+'<body>' txt += homeBanner(""); txt += '<div align="center" style="font-size:20px;">'+err+'</div' txt += '</body></html>' fd = open('err.html','w') fd.write(txt) fd.close() return txt def homeMessage(msg): return msg; txt = initHTML()+addJS()+'<body>'+homeBanner("") txt += '<div align="center" style="font-size:20px">'+msg+'</div>' txt += '</body></html>' return txt ## ******************************************************************************************** ## ******************************************************************************************** ## PYMONGO (database inquiry routines) ## ******************************************************************************************** def fullQuery(qry): import string # add the option query to mongodb query if gopt=='No Albums': qry['Albums'] = [] elif gopt=='No Tags': qry['Tags'] = [] elif gopt=='Public': qry['Class'] = 'Public' elif gopt=='Private': qry['Class'] = 'Private' return qry def getSelectedImages(inp,listby='Albums'): imgs = inp.split(',') typ = imgs[0].split(':') sim = [] if len(typ)==0: return sim dbqry = getQuery(typ[0], typ[1]) allim = coll.find(dbqry).sort('Date') for m in range(1,len(imgs)): nn = int(imgs[m][4:len(imgs[m])]) fil = allim[nn]["File"] alb = allim[nn][listby] sim = sim + [[fil, alb]] return sim def setClass(cls,inp): sim = getSelectedImages(inp) for m in sim: coll.update({"File":m[0]}, {"$set" : {"Class":cls}}) return homeMessage("Set Private class for %d images."%len(sim)) def rmAlbum(inp): import datetime sim = getSelectedImages(inp) ss = inp.split(',')[0].split(':') n = 0 qalb = '' for m in ss: if m=='Albums': qalb = ss[n+1] break n = n + 1 for m in sim: alb = [] for n in m[1]: if not(n==qalb): alb += [n]; coll.update( {'File':m[0]}, { '$set' : {'Albums':alb}} ) ctime = datetime.datetime.now() coll.update({'AlbumEdit':qalb},{'$set': {'EditLog':'remove %d images'%(len(sim)), 'EditTime':ctime } }) return homeMessage('Removed %d images from album %s'%(len(sim),qalb)) def rmTag(inp): import datetime sim = getSelectedImages(inp,'Tags') ss = inp.split(',')[0].split(':') n = 0 qtag = '' for m in ss: if m=='Tags': qtag = ss[n+1] break n = n + 1 for m in sim: tag = [] for n in m[1]: if not(n==qtag): tag += [n]; coll.update( {'File':m[0]}, { '$set' : {'Tags':tag}} ) return homeMessage('Untagged %d images from tag %s'%(len(sim),qtag)) def addAlbum(name,inp): import datetime sim = getSelectedImages(inp) ctime = datetime.datetime.now() aa = coll.find({'AlbumEdit':name}) msg = '%d images'%(len(sim)) if aa.count()==0: keyval = dict() keyval['AlbumEdit'] = name keyval['EditTime'] = ctime keyval['EditLog'] = 'New album with '+msg coll.insert( keyval ) else: coll.update({'AlbumEdit': name}, {'$set': {'EditTime':ctime, 'EditLog':'added '+msg }}) fd = open('Albums/'+name+'.abm','a') for m in sim: doupdate = False alb = [] if type(m[1]) is list: if name not in m[1]: alb = m[1] + [name] doupdate = True elif len(m[1])>0: # if no album was ever set or set as string alb = [m[1]] doupdate = True if doupdate: coll.update({"File" : m[0]}, {"$set" : { "Albums" : alb } }) fd.write(m[0]+'\n') fd.close() return homeMessage('Added %d images to album %s</div>'%(len(sim),name)) def addTags(tags,inp): sim = getSelectedImages(inp) taglst = tags.split(',') for m in sim: tgs = coll.find({'File':m[0]})[0]['Tags'] if type(tgs) is list: for n in taglst: if n.strip() not in tgs: tgs += [n.strip()] coll.update({'File':m[0]}, {'$set':{'Tags':tgs}}) return homeMessage('Added tag(s) (%s) to %d images</div>'%(tags,len(sim))) def getDistinctLists(key): from bson.code import Code mapper = Code(" function() { if('"+key+"' in this) { this."+key+".forEach( function(z){emit(z,1);} ) } } " ) reducer= Code(" function(key,val) { var tot=0; for(var i=0;i<val.length;i++)tot+=val[i]; return tot;} ") res = coll.map_reduce(mapper, reducer, "album") return res.find() def getDistinctEntries(key): from bson.code import Code mapper = Code(" function() { if('"+key+"' in this) { emit(this."+key+",1); } } ") reducer= Code(" function(key,val) { var tot=0; for(var i=0;i<val.length;i++)tot+=val[i]; return tot;} ") res = coll.map_reduce(mapper, reducer, "album") return res.find() def getDistinctMonths(): from bson.code import Code mapper = Code(" function() { if('Date' in this) { emit(this.Date.toUTCString().substring(8,16),1); } } " ) reducer= Code(' function(key,val) { var tot=0; for(var i=0;i<val.length;i++)tot+=val[i]; return tot;} ') res = coll.map_reduce(mapper, reducer, "album") return res.find() def fmt(st): import datetime if type(st) is datetime.datetime: return st.strftime(dfmt) if type(st) is str: return st return st def getEntries(key): if key=="Albums" or key=="Tags": res = getDistinctLists(key) elif key=="Date": res = getDistinctMonths() elif key=="Date" or key=="Model" or key=="Folder": res = getDistinctEntries(key) else: return None lst = [] for m in res: lst += [ [fmt(m['_id']), int(m['value']) ]] return lst def serveImage(name,sz=128): try: img = Image.open(name) img.thumbnail((sz,sz),Image.ANTIALIAS) except: return name.replace("/","-") web.header('Content-type', 'image/PNG'); buf = StringIO.StringIO() img.save(buf,'PNG') return buf.getvalue() def getQuery(key, val): import datetime import calendar dbqry = dict() if key=='Date': dt1 = datetime.datetime.strptime(val,'%b %Y') dt2 = datetime.datetime(dt1.year, dt1.month, calendar.monthrange(dt1.year, dt1.month)[1]) dt2 = datetime.datetime(dt2.year, dt2.month, dt2.day, 23, 59, 59) dbqry["Date"] = { "$gte": dt1, "$lte": dt2 } else: dbqry[key] = val dbqry = fullQuery(dbqry) return dbqry ## ******************************************************************************************** ## ******************************************************************************************** ## Server pages ## ******************************************************************************************** def showSelection(inp): fd = open('tt.txt','w') fd.write(inp) fd.close() txt = initHTML()+addJS("") txt+= '<body><div align="center">' txt+= imageBanner(''); sim = getSelectedImages(inp) txt+= 'Selected %d images<br>'%len(sim) for m in sim: txt += '<img src="'+hostid+'/image=256'+m[0]+'"><br><br>\n' txt+= '</div></body></html>' return txt def homePage(): global albumList keys = ['Model','Albums','Folder','Date','Tags'] gorb = True txt = initHTML()+addJS()+'<body>' txt += homeBanner('') txt += '\n<table>' for key in keys: ents = getEntries(key) if key=="Albums": albumList = ents txt += '<tr width="100%"><td> <txt style="valign:bottom; font-size:16px; padding-top:10px;">'+key+'</txt></td></tr>\n' txt += '<tr width="100%"><td>\n' cls = 'bluebtn' if gorb: cls = 'greenbtn' gorb = not(gorb) if ents==None or len(ents)==0: txt +='<div class="graybtn"> No entries found !</div>' else: for m in ents: txt += '<div class="'+cls+'" onclick="bylist('+"'"+key+"/"+m[0]+"'"+')">' txt += '<div align="center"><txt>'+ m[0] + '</txt>('+ str(m[1]) +')</div></div>' txt += '<br></td></tr>\n' txt += '</table>\n</body></html>' return txt def listImages(npg,inp): import string import datetime import calendar from PIL import Image if len(inp)<1: return errorPage('No images to list!') sel = '' key = inp[0] val = inp[1] dbqry = getQuery(key, val) imgs = coll.find(dbqry).sort('Date') nimg = coll.find(dbqry).count() sz = "size=128" for m in range(2,len(inp)): if inp[m].find('size')>-1: sz =inp[m].split("=")[1] elif inp[m].find('sel')==0: sel = inp[m].split('=')[1] ssel = sel + ',' if nimg>0: txt = initHTML()+addJS(key+':'+fmt(val),sel)+'<body>' if key=='Albums': txt += imageBannerAlbums() elif key=='Tags': txt += imageBannerTags() else: txt += imageBanner() txt += '<div align="center" style="width:100%;"><div id="info" width="100%" style="color:gray; visibility:hidden; position:absolute;"></div>' tpg = int(nimg/imPerPage) if tpg*imPerPage<nimg: tpg = tpg+1 ns = npg*imPerPage ne = ns+imPerPage if ns<0: ns = 0 ne = imPerPage if ns>nimg: ns = nimg ne = nimg if ne>nimg: ne = nimg opttxt = '' if len(gopt)>0: opttxt = 'opt-'+gopt+'/' keyval = '/'+key+'/'+fmt(val) if npg>0: txt += '<button class="bodybtn" id="prev" onclick="selPrev('+"'"+opttxt+'list-pg'+str(npg-1)+keyval+"'"+')">prev</button> ... ' txt += 'showing ' if nimg<=imPerPage: txt+=str(ns+1)+'-'+str(ne) else: pg = 0 nn = 1 txt+='<select onchange="impage()" id="impage" style="border:0px; background-color:white;">' while(nn<=nimg): en = nn + imPerPage-1 if(en>nimg): en = nimg sl = '' if pg==npg: sl = 'selected' txt += '<option value="list-pg'+str(pg)+keyval+'" '+sl+'>'+str(nn)+'-'+str(en)+'</option>\n' nn = nn + imPerPage pg = pg + 1 txt+='</select>' txt+=' out of '+str(nimg)+' images in '+key+"="+fmt(val)+"." if ne<nimg: txt += ' ... <button class="bodybtn" id="next" onclick="selNext('+"'"+opttxt+'list-pg'+str(npg+1)+keyval+"'"+')">next</button>' txt += '</div><br><div align="center">' nim = 0 for n in range(ns,ne): m = imgs[n] fil = str(m["File"]) imid = 'img-'+str(n) iminf= '' #'File: '+fil+', Model: '+m['Model']+', Date: '+fmt(m['Date'])+', Albums:'+string.join(m['Albums'],',')+', Tags:'+string.join(m['Tags'],',') sztxt = '' if showSizeText: imsz = Image.open(fil).size sztxt= str(imsz[0]) + ' x ' + str(imsz[1]) + ' px' imlnk= hostid+'/image'+fil titl = '' #str(nim) #date+", "+make stxt = 'class="nosel" sel="0"' if ssel.find(',img-%d,'%n)>-1: stxt = 'class="sel" sel="1"' txt += '<div align="center" onmouseout="hideinfo()" onmouseover="iminfo(event,'+"'"+iminf+"'"+')" onclick="imselect('+"'"+imid+"'"+')" id="d'+imid+'" '+stxt+'>' txt += '<img id="'+imid+'" src="'+imlnk+'" alt="'+fil+'">' txt += '<div style="position:absolute; bottom:0px; left:0px; opacity:0.7; background-color:lightgray;">'+sztxt+'</div>' txt += titl txt += '</div>\n'#<spacer width="20px"/>\n' nim += 1 txt += '</div></body></html>' fd = open('img.html','w') fd.write(txt); fd.close() return txt else: txt = initHTML()+addJS() txt+= '<body>'+homeBanner('') txt+= '<div align="center" style="font-size:20px; font-weight:bold;"> No images found to show! </div></body>' return txt def log(msg,mode='a'): fd = open('serverLog.txt',mode) fd.write(msg+'\n') fd.close() def savePage( pg ): fd = open('page.html','w') fd.write(pg); fd.close() def photoServe(name): import string import datetime global gopt global showSizeText inp = name.split("/"); log(datetime.datetime.now().strftime('%x %X')+' '+name) # get the options string out showSizeText = False iinp = [] for m in inp: if m.find('opt') == -1: iinp += [m] else: gopt = m.split('-')[1] if m.find('-showsize')>=0: showSizeText = True inp = iinp if name=="" or inp[0].find('home')==0: page = homePage() savePage( page ) return page if len(inp)<2: return errorPage('Unknown error '+name) elif inp[0].find("list")==0: tmp = inp[0].split('-') npg = 0 if len(tmp)>1: if tmp[1].find('pg')==0: npg = int(tmp[1][2:len(tmp[1])]) return listImages(npg, inp[1:len(inp)]) elif inp[0].find('private')==0: return setClass('Private',inp[1]) elif inp[0].find('rmAlbum')==0: return rmAlbum(inp[1]) elif inp[0].find('rmTag')==0: return rmTag(inp[1]) elif inp[0].find('syncdir')==0: import addDirImagesToDB import string if len(inp)>1: ddir = '' for m in range(1,len(inp)): ddir += '//'+inp[m] jfil = addDirImagesToDB.scanDirForPhotos(ddir) addDirImagesToDB.addFilesToDB(jfil) txt = '<html><body>'+homeBanner('')+'<h1>Log from photo-scanner program:</h1><br>' fd = open('.lastupdate_'+dbName,'r') for m in fd.readlines(): txt += m +'<br>\n' fd.close() txt+= '</body></html>' return txt elif inp[0].find('sync')==0: import pyflickr pyflickr.setLog() if len(inp)>2: pyflickr.setResize(inp[2].split(' ')) else: pyflickr.setResize(None) pyflickr.syncAlbum(inp[1],'vikrammelapudi') fd = open('pyflickr.log','r') txt = '<html><body>'+homeBanner('') txt += '<h1>Log from flickr-sync program:</h1><br>' for m in fd.readlines(): txt += m+'<br>\n' txt += '</body></html>' fd.close() return txt elif inp[0].find("image")==0: szin = inp[0].split("=") sz = 128 if len(szin)>1: sz = int(szin[1]) ff = '' for m in range(1,len(inp)): ff += '//'+inp[m] return serveImage(ff,sz) elif inp[0].find("album")==0: return addAlbum(inp[1],inp[2]) elif inp[0].find("tags")==0: return addTags(inp[1],inp[2]) else: return errorPage('Unknown request '+string.join(inp)+', '+name) return errorPage('Unknown error '+name)
#!/usr/bin/env python """Channel notifications support. Classes and functions to support channel subscriptions and notifications on those channels. Notes: - This code is based on experimental APIs and is subject to change. - Notification does not do deduplication of notification ids, that's up to the receiver. - Storing the Channel between calls is up to the caller. Example setting up a channel: # Create a new channel that gets notifications via webhook. channel = new_webhook_channel("https://example.com/my_web_hook") # Store the channel, keyed by 'channel.id'. Store it before calling the # watch method because notifications may start arriving before the watch # method returns. ... resp = service.objects().watchAll( bucket="some_bucket_id", body=channel.body()).execute() channel.update(resp) # Store the channel, keyed by 'channel.id'. Store it after being updated # since the resource_id value will now be correct, and that's needed to # stop a subscription. ... An example Webhook implementation using webapp2. Note that webapp2 puts headers in a case insensitive dictionary, as headers aren't guaranteed to always be upper case. id = self.request.headers[X_GOOG_CHANNEL_ID] # Retrieve the channel by id. channel = ... # Parse notification from the headers, including validating the id. n = notification_from_headers(channel, self.request.headers) # Do app specific stuff with the notification here. if n.resource_state == 'sync': # Code to handle sync state. elif n.resource_state == 'exists': # Code to handle the exists state. elif n.resource_state == 'not_exists': # Code to handle the not exists state. Example of unsubscribing. service.channels().stop(channel.body()) """ import datetime import uuid from apiclient import errors from oauth2client import util # The unix time epoch starts at midnight 1970. EPOCH = datetime.datetime.utcfromtimestamp(0) # Map the names of the parameters in the JSON channel description to # the parameter names we use in the Channel class. CHANNEL_PARAMS = { 'address': 'address', 'id': 'id', 'expiration': 'expiration', 'params': 'params', 'resourceId': 'resource_id', 'resourceUri': 'resource_uri', 'type': 'type', 'token': 'token', } X_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID' X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER' X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE' X_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI' X_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID' def _upper_header_keys(headers): new_headers = {} for k, v in headers.iteritems(): new_headers[k.upper()] = v return new_headers class Notification(object): """A Notification from a Channel. Notifications are not usually constructed directly, but are returned from functions like notification_from_headers(). Attributes: message_number: int, The unique id number of this notification. state: str, The state of the resource being monitored. uri: str, The address of the resource being monitored. resource_id: str, The unique identifier of the version of the resource at this event. """ @util.positional(5) def __init__(self, message_number, state, resource_uri, resource_id): """Notification constructor. Args: message_number: int, The unique id number of this notification. state: str, The state of the resource being monitored. Can be one of "exists", "not_exists", or "sync". resource_uri: str, The address of the resource being monitored. resource_id: str, The identifier of the watched resource. """ self.message_number = message_number self.state = state self.resource_uri = resource_uri self.resource_id = resource_id class Channel(object): """A Channel for notifications. Usually not constructed directly, instead it is returned from helper functions like new_webhook_channel(). Attributes: type: str, The type of delivery mechanism used by this channel. For example, 'web_hook'. id: str, A UUID for the channel. token: str, An arbitrary string associated with the channel that is delivered to the target address with each event delivered over this channel. address: str, The address of the receiving entity where events are delivered. Specific to the channel type. expiration: int, The time, in milliseconds from the epoch, when this channel will expire. params: dict, A dictionary of string to string, with additional parameters controlling delivery channel behavior. resource_id: str, An opaque id that identifies the resource that is being watched. Stable across different API versions. resource_uri: str, The canonicalized ID of the watched resource. """ @util.positional(5) def __init__(self, type, id, token, address, expiration=None, params=None, resource_id="", resource_uri=""): """Create a new Channel. In user code, this Channel constructor will not typically be called manually since there are functions for creating channels for each specific type with a more customized set of arguments to pass. Args: type: str, The type of delivery mechanism used by this channel. For example, 'web_hook'. id: str, A UUID for the channel. token: str, An arbitrary string associated with the channel that is delivered to the target address with each event delivered over this channel. address: str, The address of the receiving entity where events are delivered. Specific to the channel type. expiration: int, The time, in milliseconds from the epoch, when this channel will expire. params: dict, A dictionary of string to string, with additional parameters controlling delivery channel behavior. resource_id: str, An opaque id that identifies the resource that is being watched. Stable across different API versions. resource_uri: str, The canonicalized ID of the watched resource. """ self.type = type self.id = id self.token = token self.address = address self.expiration = expiration self.params = params self.resource_id = resource_id self.resource_uri = resource_uri def body(self): """Build a body from the Channel. Constructs a dictionary that's appropriate for passing into watch() methods as the value of body argument. Returns: A dictionary representation of the channel. """ result = { 'id': self.id, 'token': self.token, 'type': self.type, 'address': self.address } if self.params: result['params'] = self.params if self.resource_id: result['resourceId'] = self.resource_id if self.resource_uri: result['resourceUri'] = self.resource_uri if self.expiration: result['expiration'] = self.expiration return result def update(self, resp): """Update a channel with information from the response of watch(). When a request is sent to watch() a resource, the response returned from the watch() request is a dictionary with updated channel information, such as the resource_id, which is needed when stopping a subscription. Args: resp: dict, The response from a watch() method. """ for json_name, param_name in CHANNEL_PARAMS.iteritems(): value = resp.get(json_name) if value is not None: setattr(self, param_name, value) def notification_from_headers(channel, headers): """Parse a notification from the webhook request headers, validate the notification, and return a Notification object. Args: channel: Channel, The channel that the notification is associated with. headers: dict, A dictionary like object that contains the request headers from the webhook HTTP request. Returns: A Notification object. Raises: errors.InvalidNotificationError if the notification is invalid. ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int. """ headers = _upper_header_keys(headers) channel_id = headers[X_GOOG_CHANNEL_ID] if channel.id != channel_id: raise errors.InvalidNotificationError( 'Channel id mismatch: %s != %s' % (channel.id, channel_id)) else: message_number = int(headers[X_GOOG_MESSAGE_NUMBER]) state = headers[X_GOOG_RESOURCE_STATE] resource_uri = headers[X_GOOG_RESOURCE_URI] resource_id = headers[X_GOOG_RESOURCE_ID] return Notification(message_number, state, resource_uri, resource_id) @util.positional(2) def new_webhook_channel(url, token=None, expiration=None, params=None): """Create a new webhook Channel. Args: url: str, URL to post notifications to. token: str, An arbitrary string associated with the channel that is delivered to the target address with each notification delivered over this channel. expiration: datetime.datetime, A time in the future when the channel should expire. Can also be None if the subscription should use the default expiration. Note that different services may have different limits on how long a subscription lasts. Check the response from the watch() method to see the value the service has set for an expiration time. params: dict, Extra parameters to pass on channel creation. Currently not used for webhook channels. """ expiration_ms = 0 if expiration: delta = expiration - EPOCH expiration_ms = delta.microseconds/1000 + ( delta.seconds + delta.days*24*3600)*1000 if expiration_ms < 0: expiration_ms = 0 return Channel('web_hook', str(uuid.uuid4()), token, url, expiration=expiration_ms, params=params)