code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For miscellaneous util methods used with compute."""
import copy
import string
import mock
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import periodic_task
from nova import rpc
from nova import test
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_notifier
from nova.tests import fake_server_actions
import nova.tests.image.fake
from nova.tests import matchers
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
class ComputeValidateDeviceTestCase(test.TestCase):
def setUp(self):
super(ComputeValidateDeviceTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
# check if test name includes "xen"
if 'xen' in self.id():
self.flags(compute_driver='xenapi.XenAPIDriver')
self.instance = {
'uuid': 'fake',
'root_device_name': None,
'instance_type_id': 'fake',
}
else:
self.instance = {
'uuid': 'fake',
'root_device_name': '/dev/vda',
'default_ephemeral_device': '/dev/vdb',
'instance_type_id': 'fake',
}
self.data = []
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda context, instance, use_slave=False: self.data)
def _update_flavor(self, flavor_info):
self.flavor = {
'id': 1,
'name': 'foo',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 1,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
}
self.flavor.update(flavor_info)
self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key,
'value': value}
for key, value in
self.flavor.items()]
def _validate_device(self, device=None):
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, self.instance['uuid'])
return compute_utils.get_device_name_for_instance(
self.context, self.instance, bdms, device)
@staticmethod
def _fake_bdm(device):
return fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'volume',
'destination_type': 'volume',
'device_name': device,
'no_device': None,
'volume_id': 'fake',
'snapshot_id': None,
'guest_format': None
})
def test_wrap(self):
self.data = []
for letter in string.ascii_lowercase[2:]:
self.data.append(self._fake_bdm('/dev/vd' + letter))
device = self._validate_device()
self.assertEqual(device, '/dev/vdaa')
def test_wrap_plus_one(self):
self.data = []
for letter in string.ascii_lowercase[2:]:
self.data.append(self._fake_bdm('/dev/vd' + letter))
self.data.append(self._fake_bdm('/dev/vdaa'))
device = self._validate_device()
self.assertEqual(device, '/dev/vdab')
def test_later(self):
self.data = [
self._fake_bdm('/dev/vdc'),
self._fake_bdm('/dev/vdd'),
self._fake_bdm('/dev/vde'),
]
device = self._validate_device()
self.assertEqual(device, '/dev/vdf')
def test_gap(self):
self.data = [
self._fake_bdm('/dev/vdc'),
self._fake_bdm('/dev/vde'),
]
device = self._validate_device()
self.assertEqual(device, '/dev/vdd')
def test_no_bdms(self):
self.data = []
device = self._validate_device()
self.assertEqual(device, '/dev/vdc')
def test_lxc_names_work(self):
self.instance['root_device_name'] = '/dev/a'
self.instance['ephemeral_device_name'] = '/dev/b'
self.data = []
device = self._validate_device()
self.assertEqual(device, '/dev/c')
def test_name_conversion(self):
self.data = []
device = self._validate_device('/dev/c')
self.assertEqual(device, '/dev/vdc')
device = self._validate_device('/dev/sdc')
self.assertEqual(device, '/dev/vdc')
device = self._validate_device('/dev/xvdc')
self.assertEqual(device, '/dev/vdc')
def test_invalid_bdms(self):
self.instance['root_device_name'] = "baddata"
self.assertRaises(exception.InvalidDevicePath,
self._validate_device)
def test_invalid_device_prefix(self):
self.assertRaises(exception.InvalidDevicePath,
self._validate_device, '/baddata/vdc')
def test_device_in_use(self):
exc = self.assertRaises(exception.DevicePathInUse,
self._validate_device, '/dev/vda')
self.assertIn('/dev/vda', str(exc))
def test_swap(self):
self.instance['default_swap_device'] = "/dev/vdc"
device = self._validate_device()
self.assertEqual(device, '/dev/vdd')
def test_swap_no_ephemeral(self):
del self.instance['default_ephemeral_device']
self.instance['default_swap_device'] = "/dev/vdb"
device = self._validate_device()
self.assertEqual(device, '/dev/vdc')
def test_ephemeral_xenapi(self):
self._update_flavor({
'ephemeral_gb': 10,
'swap': 0,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdc')
def test_swap_xenapi(self):
self._update_flavor({
'ephemeral_gb': 0,
'swap': 10,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
def test_swap_and_ephemeral_xenapi(self):
self._update_flavor({
'ephemeral_gb': 10,
'swap': 10,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
def test_swap_and_one_attachment_xenapi(self):
self._update_flavor({
'ephemeral_gb': 0,
'swap': 10,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
self.data.append(self._fake_bdm(device))
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultDeviceNamesForInstanceTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.ephemerals = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': None,
'boot_index': -1})])
self.swap = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1})])
self.block_device_mapping = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdd',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1})])
self.flavor = {'swap': 4}
self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2}
self.is_libvirt = False
self.root_device_name = '/dev/vda'
self.update_called = False
def fake_extract_flavor(instance):
return self.flavor
def fake_driver_matches(driver_string):
if driver_string == 'libvirt.LibvirtDriver':
return self.is_libvirt
return False
self.patchers = []
self.patchers.append(
mock.patch.object(block_device_obj.BlockDeviceMapping, 'save'))
self.patchers.append(
mock.patch.object(
flavors, 'extract_flavor',
new=mock.Mock(side_effect=fake_extract_flavor)))
self.patchers.append(
mock.patch.object(driver,
'compute_driver_matches',
new=mock.Mock(
side_effect=fake_driver_matches)))
for patcher in self.patchers:
patcher.start()
def tearDown(self):
super(DefaultDeviceNamesForInstanceTestCase, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def _test_default_device_names(self, *block_device_lists):
compute_utils.default_device_names_for_instance(self.instance,
self.root_device_name,
*block_device_lists)
def test_only_block_device_mapping(self):
# Test no-op
original_bdm = copy.deepcopy(self.block_device_mapping)
self._test_default_device_names([], [], self.block_device_mapping)
for original, new in zip(original_bdm, self.block_device_mapping):
self.assertEqual(original.device_name, new.device_name)
# Asser it defaults the missing one as expected
self.block_device_mapping[1]['device_name'] = None
self._test_default_device_names([], [], self.block_device_mapping)
self.assertEqual(self.block_device_mapping[1]['device_name'],
'/dev/vdb')
def test_with_ephemerals(self):
# Test ephemeral gets assigned
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.block_device_mapping[1]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual(self.block_device_mapping[1]['device_name'],
'/dev/vdc')
def test_with_swap(self):
# Test swap only
self.swap[0]['device_name'] = None
self._test_default_device_names([], self.swap, [])
self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
# Test swap and block_device_mapping
self.swap[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self._test_default_device_names([], self.swap,
self.block_device_mapping)
self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
self.assertEqual(self.block_device_mapping[1]['device_name'],
'/dev/vdc')
def test_all_together(self):
# Test swap missing
self.swap[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
# Test swap and eph missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
# Test all missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
self.assertEqual(self.block_device_mapping[1]['device_name'],
'/dev/vdd')
class UsageInfoTestCase(test.TestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
super(UsageInfoTestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.flags(use_local=True, group='conductor')
self.flags(compute_driver='nova.virt.fake.FakeDriver',
network_manager='nova.network.manager.FlatManager')
self.compute = importutils.import_object(CONF.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.tests.image.fake._FakeImageService,
'show', fake_show)
fake_network.set_stub_network_methods(self.stubs)
fake_server_actions.stub_out_action_events(self.stubs)
def _create_instance(self, params={}):
"""Create a test instance."""
flavor = flavors.get_flavor_by_name('m1.tiny')
sys_meta = flavors.save_flavor_info({}, flavor)
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = flavor['id']
inst['system_metadata'] = sys_meta
inst['ami_launch_index'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['info_cache'] = {'network_info': '[]'}
inst.update(params)
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
# Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id)
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
instance.system_metadata.update(sys_metadata)
instance.save()
compute_utils.notify_usage_exists(
rpc.get_notifier('compute'), self.context, instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.exists')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_notify_usage_exists_deleted_instance(self):
# Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
instance.system_metadata.update(sys_metadata)
instance.save()
self.compute.terminate_instance(self.context, instance, [], [])
instance = objects.Instance.get_by_id(
self.context.elevated(read_deleted='yes'), instance_id,
expected_attrs=['system_metadata'])
compute_utils.notify_usage_exists(
rpc.get_notifier('compute'), self.context, instance)
msg = fake_notifier.NOTIFICATIONS[-1]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.exists')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
# Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.compute.terminate_instance(self.context, instance, [], [])
compute_utils.notify_usage_exists(
rpc.get_notifier('compute'), self.context, instance)
msg = fake_notifier.NOTIFICATIONS[-1]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.exists')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'], {})
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
def test_notify_about_instance_usage(self):
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
instance.system_metadata.update(sys_metadata)
instance.save()
extra_usage_info = {'image_name': 'fake_name'}
compute_utils.notify_about_instance_usage(
rpc.get_notifier('compute'),
self.context, instance, 'create.start',
extra_usage_info=extra_usage_info)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.create.start')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
self.assertEqual(payload['image_name'], 'fake_name')
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_notify_about_aggregate_update_with_id(self):
# Set aggregate payload
aggregate_payload = {'aggregate_id': 1}
compute_utils.notify_about_aggregate_update(self.context,
"create.end",
aggregate_payload)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'aggregate.create.end')
payload = msg.payload
self.assertEqual(payload['aggregate_id'], 1)
def test_notify_about_aggregate_update_with_name(self):
# Set aggregate payload
aggregate_payload = {'name': 'fakegroup'}
compute_utils.notify_about_aggregate_update(self.context,
"create.start",
aggregate_payload)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'aggregate.create.start')
payload = msg.payload
self.assertEqual(payload['name'], 'fakegroup')
def test_notify_about_aggregate_update_without_name_id(self):
# Set empty aggregate payload
aggregate_payload = {}
compute_utils.notify_about_aggregate_update(self.context,
"create.start",
aggregate_payload)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
class ComputeGetImageMetadataTestCase(test.TestCase):
def setUp(self):
super(ComputeGetImageMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.image = {
"min_ram": 10,
"min_disk": 1,
"disk_format": "raw",
"container_format": "bare",
"properties": {},
}
self.image_service = nova.tests.image.fake._FakeImageService()
self.stubs.Set(self.image_service, 'show', self._fake_show)
self.ctx = context.RequestContext('fake', 'fake')
sys_meta = {
'image_min_ram': 10,
'image_min_disk': 1,
'image_disk_format': 'raw',
'image_container_format': 'bare',
'instance_type_id': 0,
'instance_type_name': 'm1.fake',
'instance_type_memory_mb': 10,
'instance_type_vcpus': 1,
'instance_type_root_gb': 1,
'instance_type_ephemeral_gb': 1,
'instance_type_flavorid': '0',
'instance_type_swap': 1,
'instance_type_rxtx_factor': 0.0,
'instance_type_vcpu_weight': None,
}
self.instance = fake_instance.fake_db_instance(
memory_mb=0, root_gb=0,
system_metadata=sys_meta)
@property
def instance_obj(self):
return objects.Instance._from_db_object(
self.ctx, objects.Instance(), self.instance,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
def _fake_show(self, ctx, image_id):
return self.image
def test_get_image_meta(self):
image_meta = compute_utils.get_image_metadata(
self.ctx, self.image_service, 'fake-image', self.instance_obj)
self.image['properties'] = 'DONTCARE'
self.assertThat(self.image, matchers.DictMatches(image_meta))
def test_get_image_meta_no_image(self):
def fake_show(ctx, image_id):
raise exception.ImageNotFound(image_id='fake-image')
self.stubs.Set(self.image_service, 'show', fake_show)
image_meta = compute_utils.get_image_metadata(
self.ctx, self.image_service, 'fake-image', self.instance_obj)
self.image['properties'] = 'DONTCARE'
# NOTE(danms): The trip through system_metadata will stringify things
for key in self.image:
self.image[key] = str(self.image[key])
self.assertThat(self.image, matchers.DictMatches(image_meta))
def test_get_image_meta_no_image_system_meta(self):
for k in self.instance['system_metadata'].keys():
if k.startswith('image_'):
del self.instance['system_metadata'][k]
image_meta = compute_utils.get_image_metadata(
self.ctx, self.image_service, 'fake-image', self.instance_obj)
self.image['properties'] = 'DONTCARE'
self.assertThat(self.image, matchers.DictMatches(image_meta))
def test_get_image_meta_no_image_no_image_system_meta(self):
def fake_show(ctx, image_id):
raise exception.ImageNotFound(image_id='fake-image')
self.stubs.Set(self.image_service, 'show', fake_show)
for k in self.instance['system_metadata'].keys():
if k.startswith('image_'):
del self.instance['system_metadata'][k]
image_meta = compute_utils.get_image_metadata(
self.ctx, self.image_service, 'fake-image', self.instance_obj)
expected = {'properties': 'DONTCARE'}
self.assertThat(expected, matchers.DictMatches(image_meta))
class ComputeUtilsGetNWInfo(test.TestCase):
def test_instance_object_none_info_cache(self):
inst = fake_instance.fake_instance_obj('fake-context',
expected_attrs=['info_cache'])
self.assertIsNone(inst.info_cache)
result = compute_utils.get_nw_info_for_instance(inst)
self.assertEqual(jsonutils.dumps([]), result.json())
def test_instance_dict_none_info_cache(self):
inst = fake_instance.fake_db_instance(info_cache=None)
self.assertIsNone(inst['info_cache'])
result = compute_utils.get_nw_info_for_instance(inst)
self.assertEqual(jsonutils.dumps([]), result.json())
class ComputeUtilsGetRebootTypes(test.TestCase):
def setUp(self):
super(ComputeUtilsGetRebootTypes, self).setUp()
self.context = context.RequestContext('fake', 'fake')
def test_get_reboot_type_started_soft(self):
reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED,
power_state.RUNNING)
self.assertEqual(reboot_type, 'SOFT')
def test_get_reboot_type_pending_soft(self):
reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING,
power_state.RUNNING)
self.assertEqual(reboot_type, 'SOFT')
def test_get_reboot_type_hard(self):
reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING)
self.assertEqual(reboot_type, 'HARD')
def test_get_reboot_not_running_hard(self):
reboot_type = compute_utils.get_reboot_type('foo', 'bar')
self.assertEqual(reboot_type, 'HARD')
class ComputeUtilsPeriodicTaskSpacingWarning(test.NoDBTestCase):
@mock.patch.object(compute_utils, 'LOG')
def test_periodic_task_spacing_warning_no_op(self, mock_log):
@compute_utils.periodic_task_spacing_warn("config_value")
def not_a_periodic_task():
return "something"
self.assertEqual("something", not_a_periodic_task())
self.assertFalse(mock_log.warning.called)
self.assertFalse(mock_log.warn.called)
@mock.patch.object(compute_utils, 'LOG')
def test_periodic_task_spacing_warning_nonzero_spacing(self, mock_log):
@compute_utils.periodic_task_spacing_warn("config_value")
@periodic_task.periodic_task(spacing=10)
def a_periodic_task():
return "something"
self.assertEqual("something", a_periodic_task())
self.assertFalse(mock_log.warning.called)
self.assertFalse(mock_log.warn.called)
@mock.patch.object(compute_utils, 'LOG')
def test_periodic_task_spacing_warning_zero_spacing(self, mock_log):
@compute_utils.periodic_task_spacing_warn("config_value")
@periodic_task.periodic_task(spacing=0)
def zero_spacing_periodic_task():
return "something"
self.assertEqual("something", zero_spacing_periodic_task())
mock_log.warning.assert_called_with(mock.ANY, "config_value")
@mock.patch.object(compute_utils, 'LOG')
def test_periodic_task_spacing_warning_none_spacing(self, mock_log):
@compute_utils.periodic_task_spacing_warn("config_value")
@periodic_task.periodic_task(spacing=None)
def none_spacing_periodic_task():
return "something"
self.assertEqual("something", none_spacing_periodic_task())
mock_log.warning.assert_called_with(mock.ANY, "config_value")
@mock.patch.object(compute_utils, 'LOG')
def test_periodic_task_spacing_warning_default_spacing(self, mock_log):
@compute_utils.periodic_task_spacing_warn("config_value")
@periodic_task.periodic_task
def default_spacing_periodic_task():
return "something"
self.assertEqual("something", default_spacing_periodic_task())
mock_log.warning.assert_called_with(mock.ANY, "config_value")
|
tanglei528/nova
|
nova/tests/compute/test_compute_utils.py
|
Python
|
apache-2.0
| 34,518
|
#!/usr/bin/env python
"""Output plugins implementations."""
from grr_response_server import output_plugin
# pylint: disable=unused-import,g-import-not-at-top
try:
from grr_response_server.output_plugins import bigquery_plugin
except ImportError:
pass
from grr_response_server.output_plugins import csv_plugin
from grr_response_server.output_plugins import elasticsearch_plugin
from grr_response_server.output_plugins import email_plugin
from grr_response_server.output_plugins import splunk_plugin
from grr_response_server.output_plugins import sqlite_plugin
from grr_response_server.output_plugins import yaml_plugin
|
google/grr
|
grr/server/grr_response_server/output_plugins/__init__.py
|
Python
|
apache-2.0
| 625
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration
Revision ID: 464e951dc3b8
Revises: None
Create Date: 2014-08-05 17:41:34.470183
"""
# revision identifiers, used by Alembic.
revision = '464e951dc3b8'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'states',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('state', sa.BigInteger(), nullable=False),
sa.Column('s_metadata', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'modules_state',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('state', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('name'))
|
stackforge/cloudkitty
|
cloudkitty/db/sqlalchemy/alembic/versions/464e951dc3b8_initial_migration.py
|
Python
|
apache-2.0
| 1,286
|
from read_data import read_from_file,read_nell_relations,get_relation_fname,get_node
from nltk.stem.snowball import SnowballStemmer
from nltk import word_tokenize,pos_tag
from predicate_extraction_set_matching import set_matching
import operator,sys
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from misc_func import remove_stopwords
from relations_dict import create_dict,create_dict_adva
'''
* python file which drivers the whole program
*
* Copyright 2015 Vedsar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
if __name__=="__main__":
#usr_query=raw_input("Enter the query\n")
print "graph loading in memory"
#sub,obj=create_dict_adva()
#comment this uncomment upper line
sub,obj=create_dict()
print "graph loaded in memory"
verb_tag=['VB','VBD','VBG','VBN','VBP','VBZ']
nell_relations=read_nell_relations()
while(1):
#print "Enter the query"
query=raw_input()
stemmer=SnowballStemmer('english')
w_set=word_tokenize(query)
p_tag=pos_tag(w_set)
verb_list=[]
#print "user query configured"
for i in p_tag:
if(i[1] in verb_tag):
verb_list.append(i[0])
sub_obj=[]
#sub_obj=read_from_file('svo_file_modi.csv',verb_list,w_set)
"""if len(sub_obj)==0:
print "Not able to find subject and object in the query"
sys.exit(0)
"""
relation=[]
for so in sub_obj:
#check all combinations of so[0] with so[2]
for i in so[0]:
for j in so[2]:
try:
tmprel=sub[i].intersection(obj[j])
relation.append(tmprel)
except KeyError:
pass
"""fil_word=remove_stopwords(w_set)
tmpfil_word=[]
for x in fil_word:
tmpfil_word.extend(x.split('_'))
#uncomment below para
#print tmpfil_word
for i in xrange(len(tmpfil_word)):
for j in xrange(i+1,len(tmpfil_word)):
#print fil_word[i],fil_word[j]
try:
tmprel=sub[tmpfil_word[i]].intersection(obj[tmpfil_word[j]])
relation.append(tmprel)
except KeyError:
pass
if(len(relation)==0):
print "No information found with this relation"
else:
for i in relation:
print i
#for i in nell_relations:
# print i
"""
filter_relations=set_matching(nell_relations,query,None)
for i in filter_relations:
#chk nodes
rel_fname=get_relation_fname(i)
#if i!='company also known as':
# continue
#print rel_fname
ans=get_node(rel_fname,query)
print "Relation is: " +str(i)
sorted(ans, key=operator.itemgetter(1))
flag=0
for j in ans:
if int(j[1])>1:
flag=1
print j
if(flag==0):
print "No information found with this relation"
|
vedsarkushwaha/KBH_NELL
|
ques_ans.py
|
Python
|
apache-2.0
| 3,087
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'chénglíng'
CN=u'承灵'
NAME=u'chengling22'
CHANNEL='gallbladder'
CHANNEL_FULLNAME='GallbladderChannelofFoot-Shaoyang'
SEQ='GB18'
if __name__ == '__main__':
pass
|
sinotradition/meridian
|
meridian/acupoints/chengling22.py
|
Python
|
apache-2.0
| 248
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from uuid import uuid4
import mc_unittest
from rogerthat.api.services import getStaticFlow
from rogerthat.bizz.friend_helper import FriendHelper
from rogerthat.bizz.friends import makeFriends, ORIGIN_USER_INVITE
from rogerthat.bizz.profile import create_service_profile, create_user_profile
from rogerthat.bizz.service import create_menu_item, move_menu_item
from rogerthat.bizz.service.broadcast import generate_broadcast_settings_static_flow, \
generate_broadcast_settings_flow_def, _check_flow_end_modules
from rogerthat.dal.profile import get_user_profile
from rogerthat.dal.service import get_default_service_identity, get_friend_serviceidentity_connection
from rogerthat.models import CustomMessageFlowDesign
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.to.friends import ServiceMenuTO, FRIEND_TYPE_SERVICE
from rogerthat.to.service import GetStaticFlowRequestTO
from rogerthat.to.service_panel import WebServiceMenuTO
from rogerthat.utils.service import add_slash_default
from rogerthat_tests import set_current_user
class Test(mc_unittest.TestCase):
def _prepare_users(self):
service_user = users.User('svc-%s@foo.com' % uuid4())
service_profile = create_service_profile(service_user, u"s1")[0]
human_user = users.User('user-%s@foo.com' % uuid4())
user_profile = get_user_profile(human_user) or create_user_profile(human_user, u"i")
set_current_user(user_profile.user)
return user_profile, service_profile
def test_generated_broadcast_settings_flow(self):
user_profile, service_profile = self._prepare_users()
friend_user = user_profile.user
service_identity_user = add_slash_default(service_profile.user)
helper = FriendHelper.from_data_store(service_identity_user, FRIEND_TYPE_SERVICE)
helper.get_service_profile()
helper._service_profile = service_profile # because we modify the service_profile which is cached by the helper
makeFriends(friend_user, service_identity_user, service_identity_user, None, origin=ORIGIN_USER_INVITE)
# No broadcast types ==> no flow
self.assertFalse(generate_broadcast_settings_static_flow(helper, user_profile.user))
service_profile.broadcastTypes = ['Apes', 'Birds', 'Cats', 'Dogs']
service_profile.put()
friend_service_identity_connection = get_friend_serviceidentity_connection(friend_user, service_identity_user)
friend_service_identity_connection.enabled_broadcast_types = service_profile.broadcastTypes
friend_service_identity_connection.put()
self.assertTrue(generate_broadcast_settings_static_flow(helper, user_profile.user))
mfds = generate_broadcast_settings_flow_def(helper, user_profile)
self.assertEqual(1, len(mfds.definition))
mf_def = mfds.definition[0]
self.assertEqual(1, len(mf_def.formMessage))
fm = mf_def.formMessage[0]
self.assertEqual(len(service_profile.broadcastTypes), len(fm.form.widget.choice))
self.assertEqual(0, len(service_profile.broadcastTypes) - len(fm.form.widget.value)) # 0 disabled values
friend_service_identity_connection1 = get_friend_serviceidentity_connection(friend_user, service_identity_user)
friend_service_identity_connection1.enabled_broadcast_types = ['Birds', 'Cats', 'Dogs']
friend_service_identity_connection1.disabled_broadcast_types = ['Apes']
friend_service_identity_connection1.put()
mfds = generate_broadcast_settings_flow_def(helper, user_profile)
self.assertEqual(1, len(mfds.definition))
mf_def = mfds.definition[0]
self.assertEqual(1, len(mf_def.formMessage))
fm = mf_def.formMessage[0]
self.assertEqual(len(service_profile.broadcastTypes), len(fm.form.widget.choice))
self.assertEqual(1, len(service_profile.broadcastTypes) - len(fm.form.widget.value)) # 1 disabled values
friend_service_identity_connection2 = get_friend_serviceidentity_connection(friend_user, service_identity_user)
friend_service_identity_connection2.enabled_broadcast_types = ['Cats', 'Dogs']
friend_service_identity_connection2.disabled_broadcast_types = ['Apes', 'Birds']
friend_service_identity_connection2.put()
mfds = generate_broadcast_settings_flow_def(helper, user_profile)
self.assertEqual(1, len(mfds.definition))
mf_def = mfds.definition[0]
self.assertEqual(1, len(mf_def.formMessage))
fm = mf_def.formMessage[0]
self.assertEqual(len(service_profile.broadcastTypes), len(fm.form.widget.choice))
self.assertEqual(2, len(service_profile.broadcastTypes) - len(fm.form.widget.value)) # 2 disabled values
def test_menu_items(self):
user_profile, service_profile = self._prepare_users()
friend_user = user_profile.user
service_user = service_profile.user
service_identity = get_default_service_identity(service_profile.user)
makeFriends(friend_user, service_identity.user, service_identity.user, None, origin=ORIGIN_USER_INVITE)
helper = FriendHelper.from_data_store(service_identity.user, FRIEND_TYPE_SERVICE)
helper.get_service_profile()
helper._service_profile = service_profile # because we modify the service_profile which is cached by the helper
menu = ServiceMenuTO.from_model(helper, user_profile.language, user_profile)
self.assertEqual(0, len(menu.items))
# No broadcast types --> error
self.assertRaises(BusinessException, create_menu_item, service_user, "3d", "000000", "label", "tag", [1, 1, 1],
None, None, False, False, [], is_broadcast_settings=True, broadcast_branding=None)
service_profile.broadcastTypes = ['Apes', 'Birds', 'Cats', 'Dogs']
service_profile.put()
friend_service_identity_connection = get_friend_serviceidentity_connection(friend_user, service_identity.user)
friend_service_identity_connection.enabled_broadcast_types = service_profile.broadcastTypes
friend_service_identity_connection.put()
create_menu_item(service_user, "3d", "000000", "label", "tag", [1, 1, 1], None, None, False, False, [],
is_broadcast_settings=True, broadcast_branding=None)
def _test_1_bc_settings_item():
menu = ServiceMenuTO.from_model(helper, user_profile.language, user_profile)
self.assertEqual(1, len(menu.items))
smi = menu.items[0]
self.assertTrue(smi.staticFlowHash)
request = GetStaticFlowRequestTO()
request.service = service_user.email()
request.staticFlowHash = smi.staticFlowHash
request.coords = smi.coords
response = getStaticFlow(request)
self.assertTrue(response)
self.assertTrue(response.staticFlow)
_test_1_bc_settings_item()
move_menu_item(service_user, [1, 1, 1], [2, 2, 2])
_test_1_bc_settings_item()
# test Web-version of menu TO
WebServiceMenuTO.from_model(helper, user_profile.language, user_profile)
def test_patched_test_broadcast_flow(self):
_, service_profile = self._prepare_users()
mfd = CustomMessageFlowDesign()
mfd.user = service_profile.user
mfd.key = lambda: None
mfd.xml = """<?xml version="1.0" encoding="utf-8"?>
<messageFlowDefinitionSet xmlns="https://rogerth.at/api/1/MessageFlow.xsd">
<definition name="test2" language="en" startReference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogIm1lc3NhZ2VfMTIzIn0=">
<end id="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8xIn0=" waitForFollowUpMessage="false"/>
<end id="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8yIn0=" waitForFollowUpMessage="false"/>
<end id="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8zIn0=" waitForFollowUpMessage="false"/>
<end id="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF81In0=" waitForFollowUpMessage="false"/>
<message id="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogIm1lc3NhZ2VfMTIzIn0=" alertIntervalType="NONE" alertType="SILENT" allowDismiss="true" vibrate="false" dismissReference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF81In0=" autoLock="true">
<content>123</content>
<answer action="" caption="1" id="button_1" reference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8zIn0="/>
<answer action="" caption="2" id="button_2" reference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogIm1lc3NhZ2VfMSJ9"/>
<answer action="" caption="4" id="button_4" reference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8xIn0="/>
</message>
<formMessage id="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogIm1lc3NhZ2VfMSJ9" alertIntervalType="NONE" alertType="SILENT" positiveReference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8yIn0=" vibrate="false" autoLock="true" negativeReference="base64:eyJsYW5nIjogImVuIiwgIm1mZCI6ICJhaE5rWlhaLWJXOWlhV05oWjJWamJHOTFaR2h5Y2pZTEVncHRZeTEwY21GamEyVnlJZ3B6TVVCbWIyOHVZMjl0REFzU0VVMWxjM05oWjJWR2JHOTNSR1Z6YVdkdUlnVjBaWE4wTWd3IiwgImlkIjogImVuZF8zIn0=">
<content>Which type of broadcasts do you wish to receive:</content>
<form positiveButtonConfirmation="" negativeButtonCaption="Cancel" positiveButtonCaption="Save" negativeButtonConfirmation="">
<widget xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="SelectMultiWidget">
<choice value="1" label="1"/>
<choice value="2" label="2"/>
<value value="1"/>
<value value="2"/>
</widget>
</form>
</formMessage>
</definition>
</messageFlowDefinitionSet>
"""
new_xml = _check_flow_end_modules(mfd)
assert new_xml
def test_patched_test_broadcast_flow2(self):
_, service_profile = self._prepare_users()
mfd = CustomMessageFlowDesign()
mfd.user = service_profile.user
mfd.key = lambda: None
mfd.xml = """<?xml version="1.0" encoding="utf-8"?>
<messageFlowDefinitionSet xmlns="https://rogerth.at/api/1/MessageFlow.xsd">
<definition
name="Feedback"
language="en"
startReference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMSIsICJsYW5nIjogIm5sIn0=">
<end
id="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAiZW5kX0VORCIsICJsYW5nIjogIm5sIn0="
waitForFollowUpMessage="false" />
<message
id="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMSIsICJsYW5nIjogIm5sIn0="
alertIntervalType="NONE"
alertType="SILENT"
brandingKey=""
allowDismiss="false"
vibrate="false"
autoLock="true">
<content>feedback-message-1</content>
<answer
action=""
caption="Yes"
id="button_Q1_YES"
reference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMiIsICJsYW5nIjogIm5sIn0=" />
<answer
action=""
caption="No"
id="button_Q1_NO"
reference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMiIsICJsYW5nIjogIm5sIn0=" />
</message>
<message
id="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMiIsICJsYW5nIjogIm5sIn0="
alertIntervalType="NONE"
alertType="SILENT"
brandingKey=""
allowDismiss="false"
vibrate="false"
autoLock="true">
<content>feedback-message-2</content>
<answer
action=""
caption="Yes"
id="button_Q2_YES"
reference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAiZmx1c2hfUjEiLCAibGFuZyI6ICJubCJ9" />
<answer
action=""
caption="No"
id="button_Q2_NO"
reference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAiZmx1c2hfUjEiLCAibGFuZyI6ICJubCJ9" />
</message>
<message
id="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMyIsICJsYW5nIjogIm5sIn0="
alertIntervalType="NONE"
alertType="SILENT"
brandingKey=""
allowDismiss="true"
vibrate="false"
dismissReference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAiZW5kX0VORCIsICJsYW5nIjogIm5sIn0="
autoLock="true">
<content>feedback-message-3</content>
</message>
<resultsFlush
id="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAiZmx1c2hfUjEiLCAibGFuZyI6ICJubCJ9"
reference="base64:eyJtZmQiOiAiYWhOa1pYWi1iVzlpYVdOaFoyVmpiRzkxWkdoeWNrSUxFZ3B0WXkxMGNtRmphMlZ5SWhOeVpYTjBielpBYlc5aWFXTmhaMlV1WTI5dERBc1NFVTFsYzNOaFoyVkdiRzkzUkdWemFXZHVJZ2htWldWa1ltRmphd3ciLCAiaWQiOiAibWVzc2FnZV9RMyIsICJsYW5nIjogIm5sIn0=" />
</definition>
</messageFlowDefinitionSet>
"""
new_xml = _check_flow_end_modules(mfd)
assert new_xml
def test_patched_test_broadcast_flow3(self):
_, service_profile = self._prepare_users()
mfd = CustomMessageFlowDesign()
mfd.user = service_profile.user
mfd.key = lambda: None
mfd.xml = """<?xml version="1.0" encoding="utf-8"?>
<messageFlowDefinitionSet xmlns="https://rogerth.at/api/1/MessageFlow.xsd">
<definition name="meldingskaart" language="nl" startReference="message_1">
<end id="end_1" waitForFollowUpMessage="false"/>
<message id="message_1" alertIntervalType="NONE" alertType="SILENT" brandingKey="B26DFDEF91BB94325DF1537AE6A9048F10B1C9FB84836CAE085B4355EA3408A5" allowDismiss="false" vibrate="false" autoLock="true">
<content>maak hier uw keuze</content>
<answer action="" caption="slecht wegdek" id="button_slecht wegdek" reference="message_2"/>
<answer action="tel://32475982340" caption="bel lieven" id="button_bel lieven" reference="end_1"/>
<answer action="" caption="zwerfvuil" id="button_zwerfvuil" reference="message_2"/>
</message>
<formMessage id="message_2" alertIntervalType="NONE" alertType="SILENT" brandingKey="B26DFDEF91BB94325DF1537AE6A9048F10B1C9FB84836CAE085B4355EA3408A5" positiveReference="message_3" vibrate="false" autoLock="true" negativeReference="message_1">
<content>???</content>
<form positiveButtonConfirmation="" negativeButtonCaption="annuleren" positiveButtonCaption="verder" negativeButtonConfirmation="">
<widget xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="TextBlockWidget" maxChars="50"/>
</form>
</formMessage>
<formMessage id="message_3" alertIntervalType="NONE" alertType="SILENT" brandingKey="B26DFDEF91BB94325DF1537AE6A9048F10B1C9FB84836CAE085B4355EA3408A5" positiveReference="message_4=" vibrate="false" autoLock="true" negativeReference="end_1">
<content>cdsc</content>
<form positiveButtonConfirmation="" negativeButtonCaption="Cancel" positiveButtonCaption="Submit" negativeButtonConfirmation="">
<widget xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="PhotoUploadWidget" camera="true" quality="400000" gallery="false"/>
</form>
</formMessage>
<formMessage id="message_4=" alertIntervalType="NONE" alertType="SILENT" brandingKey="B26DFDEF91BB94325DF1537AE6A9048F10B1C9FB84836CAE085B4355EA3408A5" positiveReference="email_1" vibrate="false" autoLock="true" negativeReference="message_3">
<content>????</content>
<form positiveButtonConfirmation="" negativeButtonCaption="Cancel" positiveButtonCaption="Submit" negativeButtonConfirmation="">
<widget xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="SliderWidget" max="10.000000" step="1.000000" precision="1" unit="<value/>" min="1.000000" value="1.000000"/>
</form>
</formMessage>
<resultsEmail id="email_1" reference="end_1" emailAdmins="false">
<email value="communicatie@lochristi.be"/>
</resultsEmail>
</definition>
</messageFlowDefinitionSet>
"""
new_xml = _check_flow_end_modules(mfd)
assert new_xml
|
rogerthat-platform/rogerthat-backend
|
src-test/rogerthat_tests/mobicage/bizz/service/test_broadcast.py
|
Python
|
apache-2.0
| 20,430
|
from domino import Domino
import os
domino = Domino("marks/quick-start-fork",
api_key=os.environ['DOMINO_USER_API_KEY'],
host=os.environ['DOMINO_API_HOST'])
if domino.endpoint_publish("main.py", "api_endpoint",
"22d864481c66b36d676056905d1f0545f5d3b742"):
print("API endpoint published!")
else:
print("API endpoint could not be published")
|
dominodatalab/python-domino
|
examples/publish_api_endpoint.py
|
Python
|
apache-2.0
| 409
|
class FeedDisplayMapper:
def __init__(self):
self._display_to_internal_map = {}
self._internal_to_display_map = {}
def register(self, internal_name: str, display_name: str) -> None:
self._display_to_internal_map[display_name] = internal_name
self._internal_to_display_map[internal_name] = display_name
def get_display_name(self, internal_name: str) -> str:
return self._internal_to_display_map[internal_name]
def get_internal_name(self, display_name: str) -> str:
return self._display_to_internal_map[display_name]
|
anchore/anchore-engine
|
anchore_engine/services/policy_engine/engine/vulns/feed_display_mapper.py
|
Python
|
apache-2.0
| 582
|
#
# Copyright 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import fnmatch
import hashlib
import itertools
import json
import operator
import pkg_resources
import threading
import uuid
from gnocchiclient import exceptions as gnocchi_exc
from keystoneauth1 import exceptions as ka_exceptions
from oslo_log import log
from oslo_utils import timeutils
from stevedore import extension
from urllib import parse as urlparse
from ceilometer import declarative
from ceilometer import gnocchi_client
from ceilometer.i18n import _
from ceilometer import keystone_client
from ceilometer import publisher
NAME_ENCODED = __name__.encode('utf-8')
CACHE_NAMESPACE = uuid.UUID(bytes=hashlib.md5(NAME_ENCODED).digest())
LOG = log.getLogger(__name__)
def cache_key_mangler(key):
"""Construct an opaque cache key."""
return uuid.uuid5(CACHE_NAMESPACE, key).hex
EVENT_CREATE, EVENT_UPDATE, EVENT_DELETE = ("create", "update", "delete")
class ResourcesDefinition(object):
MANDATORY_FIELDS = {'resource_type': str,
'metrics': (dict, list)}
MANDATORY_EVENT_FIELDS = {'id': str}
def __init__(self, definition_cfg, archive_policy_default,
archive_policy_override, plugin_manager):
self.cfg = definition_cfg
self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg)
if self.support_events():
self._check_required_and_types(self.MANDATORY_EVENT_FIELDS,
self.cfg['event_attributes'])
self._attributes = {}
for name, attr_cfg in self.cfg.get('attributes', {}).items():
self._attributes[name] = declarative.Definition(name, attr_cfg,
plugin_manager)
self._event_attributes = {}
for name, attr_cfg in self.cfg.get('event_attributes', {}).items():
self._event_attributes[name] = declarative.Definition(
name, attr_cfg, plugin_manager)
self.metrics = {}
# NOTE(sileht): Convert old list to new dict format
if isinstance(self.cfg['metrics'], list):
values = [None] * len(self.cfg['metrics'])
self.cfg['metrics'] = dict(zip(self.cfg['metrics'], values))
for m, extra in self.cfg['metrics'].items():
if not extra:
extra = {}
if not extra.get("archive_policy_name"):
extra["archive_policy_name"] = archive_policy_default
if archive_policy_override:
extra["archive_policy_name"] = archive_policy_override
# NOTE(sileht): For backward compat, this is after the override to
# preserve the wierd previous behavior. We don't really care as we
# deprecate it.
if 'archive_policy' in self.cfg:
LOG.warning("archive_policy '%s' for a resource-type (%s) is "
"deprecated, set it for each metric instead.",
self.cfg["archive_policy"],
self.cfg["resource_type"])
extra["archive_policy_name"] = self.cfg['archive_policy']
self.metrics[m] = extra
@staticmethod
def _check_required_and_types(expected, definition):
for field, field_types in expected.items():
if field not in definition:
raise declarative.ResourceDefinitionException(
_("Required field %s not specified") % field, definition)
if not isinstance(definition[field], field_types):
raise declarative.ResourceDefinitionException(
_("Required field %(field)s should be a %(type)s") %
{'field': field, 'type': field_types}, definition)
@staticmethod
def _ensure_list(value):
if isinstance(value, list):
return value
return [value]
def support_events(self):
for e in ["event_create", "event_delete", "event_update"]:
if e in self.cfg:
return True
return False
def event_match(self, event_type):
for e in self._ensure_list(self.cfg.get('event_create', [])):
if fnmatch.fnmatch(event_type, e):
return EVENT_CREATE
for e in self._ensure_list(self.cfg.get('event_delete', [])):
if fnmatch.fnmatch(event_type, e):
return EVENT_DELETE
for e in self._ensure_list(self.cfg.get('event_update', [])):
if fnmatch.fnmatch(event_type, e):
return EVENT_UPDATE
def sample_attributes(self, sample):
attrs = {}
sample_dict = sample.as_dict()
for name, definition in self._attributes.items():
value = definition.parse(sample_dict)
if value is not None:
attrs[name] = value
return attrs
def event_attributes(self, event):
attrs = {'type': self.cfg['resource_type']}
traits = dict([(trait.name, trait.value) for trait in event.traits])
for attr, field in self.cfg.get('event_attributes', {}).items():
value = traits.get(field)
if value is not None:
attrs[attr] = value
return attrs
class LockedDefaultDict(defaultdict):
"""defaultdict with lock to handle threading
Dictionary only deletes if nothing is accessing dict and nothing is holding
lock to be deleted. If both cases are not true, it will skip delete.
"""
def __init__(self, *args, **kwargs):
self.lock = threading.Lock()
super(LockedDefaultDict, self).__init__(*args, **kwargs)
def __getitem__(self, key):
with self.lock:
return super(LockedDefaultDict, self).__getitem__(key)
def pop(self, key, *args):
with self.lock:
key_lock = super(LockedDefaultDict, self).__getitem__(key)
if key_lock.acquire(False):
try:
super(LockedDefaultDict, self).pop(key, *args)
finally:
key_lock.release()
class GnocchiPublisher(publisher.ConfigPublisherBase):
"""Publisher class for recording metering data into the Gnocchi service.
The publisher class records each meter into the gnocchi service
configured in Ceilometer pipeline file. An example target may
look like the following:
gnocchi://?archive_policy=low&filter_project=gnocchi
"""
def __init__(self, conf, parsed_url):
super(GnocchiPublisher, self).__init__(conf, parsed_url)
# TODO(jd) allow to override Gnocchi endpoint via the host in the URL
options = urlparse.parse_qs(parsed_url.query)
self.filter_project = options.get('filter_project', ['service'])[-1]
self.filter_domain = options.get('filter_domain', ['Default'])[-1]
resources_definition_file = options.get(
'resources_definition_file', ['gnocchi_resources.yaml'])[-1]
archive_policy_override = options.get('archive_policy', [None])[-1]
self.resources_definition, self.archive_policies_definition = (
self._load_definitions(conf, archive_policy_override,
resources_definition_file))
self.metric_map = dict((metric, rd) for rd in self.resources_definition
for metric in rd.metrics)
timeout = options.get('timeout', [6.05])[-1]
self._ks_client = keystone_client.get_client(conf)
self.cache = None
try:
import oslo_cache
oslo_cache.configure(conf)
# NOTE(cdent): The default cache backend is a real but
# noop backend. We don't want to use that here because
# we want to avoid the cache pathways entirely if the
# cache has not been configured explicitly.
if conf.cache.enabled:
cache_region = oslo_cache.create_region()
self.cache = oslo_cache.configure_cache_region(
conf, cache_region)
self.cache.key_mangler = cache_key_mangler
except ImportError:
pass
except oslo_cache.exception.ConfigurationError as exc:
LOG.warning('unable to configure oslo_cache: %s', exc)
self._gnocchi_project_id = None
self._gnocchi_project_id_lock = threading.Lock()
self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock)
self._gnocchi = gnocchi_client.get_gnocchiclient(
conf, request_timeout=timeout)
self._already_logged_event_types = set()
self._already_logged_metric_names = set()
self._already_configured_archive_policies = False
@staticmethod
def _load_definitions(conf, archive_policy_override,
resources_definition_file):
plugin_manager = extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin')
data = declarative.load_definitions(
conf, {}, resources_definition_file,
pkg_resources.resource_filename(__name__,
"data/gnocchi_resources.yaml"))
archive_policy_default = data.get("archive_policy_default",
"ceilometer-low")
resource_defs = []
for resource in data.get('resources', []):
try:
resource_defs.append(ResourcesDefinition(
resource,
archive_policy_default,
archive_policy_override,
plugin_manager))
except Exception:
LOG.error("Failed to load resource due to error",
exc_info=True)
return resource_defs, data.get("archive_policies", [])
def ensures_archives_policies(self):
if not self._already_configured_archive_policies:
for ap in self.archive_policies_definition:
try:
self._gnocchi.archive_policy.create(ap)
except gnocchi_exc.ArchivePolicyAlreadyExists:
# created in the meantime by another worker
pass
self._already_configured_archive_policies = True
@property
def gnocchi_project_id(self):
if self._gnocchi_project_id is not None:
return self._gnocchi_project_id
with self._gnocchi_project_id_lock:
if self._gnocchi_project_id is None:
try:
project = self._ks_client.projects.find(
name=self.filter_project,
domain=self.filter_domain)
except ka_exceptions.NotFound:
LOG.warning('filtered project not found in keystone,'
' ignoring the filter_project '
'option')
self.filter_project = None
return None
except Exception:
LOG.exception('fail to retrieve filtered project ')
raise
self._gnocchi_project_id = project.id
LOG.debug("filtered project found: %s",
self._gnocchi_project_id)
return self._gnocchi_project_id
def _is_swift_account_sample(self, sample):
try:
return (self.metric_map[sample.name].cfg['resource_type']
== 'swift_account')
except KeyError:
return False
def _is_gnocchi_activity(self, sample):
return (self.filter_project and self.gnocchi_project_id and (
# avoid anything from the user used by gnocchi
sample.project_id == self.gnocchi_project_id or
# avoid anything in the swift account used by gnocchi
(sample.resource_id == self.gnocchi_project_id and
self._is_swift_account_sample(sample))
))
def _get_resource_definition_from_event(self, event_type):
for rd in self.resources_definition:
operation = rd.event_match(event_type)
if operation:
return rd, operation
def publish_samples(self, data):
self.ensures_archives_policies()
# NOTE(sileht): skip sample generated by gnocchi itself
data = [s for s in data if not self._is_gnocchi_activity(s)]
data.sort(key=operator.attrgetter('resource_id'))
resource_grouped_samples = itertools.groupby(
data, key=operator.attrgetter('resource_id'))
gnocchi_data = {}
measures = {}
for resource_id, samples_of_resource in resource_grouped_samples:
for sample in samples_of_resource:
metric_name = sample.name
LOG.debug("Processing sample [%s] for resource ID [%s].",
sample, resource_id)
rd = self.metric_map.get(metric_name)
if rd is None:
if metric_name not in self._already_logged_metric_names:
LOG.warning("metric %s is not handled by Gnocchi" %
metric_name)
self._already_logged_metric_names.add(metric_name)
continue
# NOTE(sileht): / is forbidden by Gnocchi
resource_id = resource_id.replace('/', '_')
if resource_id not in gnocchi_data:
gnocchi_data[resource_id] = {
'resource_type': rd.cfg['resource_type'],
'resource': {"id": resource_id,
"user_id": sample.user_id,
"project_id": sample.project_id}}
gnocchi_data[resource_id].setdefault(
"resource_extra", {}).update(rd.sample_attributes(sample))
measures.setdefault(resource_id, {}).setdefault(
metric_name,
{"measures": [],
"archive_policy_name":
rd.metrics[metric_name]["archive_policy_name"],
"unit": sample.unit}
)["measures"].append(
{'timestamp': sample.timestamp,
'value': sample.volume}
)
try:
self.batch_measures(measures, gnocchi_data)
except gnocchi_exc.ClientException as e:
LOG.error(str(e))
except Exception as e:
LOG.error(str(e), exc_info=True)
for info in gnocchi_data.values():
resource = info["resource"]
resource_type = info["resource_type"]
resource_extra = info["resource_extra"]
if not resource_extra:
continue
try:
self._if_not_cached(resource_type, resource['id'],
resource_extra)
except gnocchi_exc.ClientException as e:
LOG.error(str(e))
except Exception as e:
LOG.error(str(e), exc_info=True)
@staticmethod
def _extract_resources_from_error(e, resource_infos):
resource_ids = set([r['original_resource_id']
for r in e.message['detail']])
return [(resource_infos[rid]['resource_type'],
resource_infos[rid]['resource'],
resource_infos[rid]['resource_extra'])
for rid in resource_ids]
def batch_measures(self, measures, resource_infos):
# NOTE(sileht): We don't care about error here, we want
# resources metadata always been updated
try:
self._gnocchi.metric.batch_resources_metrics_measures(
measures, create_metrics=True)
except gnocchi_exc.BadRequest as e:
if not isinstance(e.message, dict):
raise
if e.message.get('cause') != 'Unknown resources':
raise
resources = self._extract_resources_from_error(e, resource_infos)
for resource_type, resource, resource_extra in resources:
try:
resource.update(resource_extra)
self._create_resource(resource_type, resource)
except gnocchi_exc.ResourceAlreadyExists:
# NOTE(sileht): resource created in the meantime
pass
except gnocchi_exc.ClientException as e:
LOG.error('Error creating resource %(id)s: %(err)s',
{'id': resource['id'], 'err': str(e)})
# We cannot post measures for this resource
# and we can't patch it later
del measures[resource['id']]
del resource_infos[resource['id']]
else:
if self.cache and resource_extra:
self.cache.set(resource['id'],
self._hash_resource(resource_extra))
# NOTE(sileht): we have created missing resources/metrics,
# now retry to post measures
self._gnocchi.metric.batch_resources_metrics_measures(
measures, create_metrics=True)
LOG.debug(
"%d measures posted against %d metrics through %d resources",
sum(len(m["measures"])
for rid in measures
for m in measures[rid].values()),
sum(len(m) for m in measures.values()), len(resource_infos))
def _create_resource(self, resource_type, resource):
self._gnocchi.resource.create(resource_type, resource)
LOG.debug('Resource %s created', resource["id"])
def _update_resource(self, resource_type, res_id, resource_extra):
self._gnocchi.resource.update(resource_type, res_id, resource_extra)
LOG.debug('Resource %s updated', res_id)
def _if_not_cached(self, resource_type, res_id, resource_extra):
if self.cache:
attribute_hash = self._hash_resource(resource_extra)
if self._resource_cache_diff(res_id, attribute_hash):
with self._gnocchi_resource_lock[res_id]:
# NOTE(luogangyi): there is a possibility that the
# resource was already built in cache by another
# ceilometer-notification-agent when we get the lock here.
if self._resource_cache_diff(res_id, attribute_hash):
self._update_resource(resource_type, res_id,
resource_extra)
self.cache.set(res_id, attribute_hash)
else:
LOG.debug('Resource cache hit for %s', res_id)
self._gnocchi_resource_lock.pop(res_id, None)
else:
LOG.debug('Resource cache hit for %s', res_id)
else:
self._update_resource(resource_type, res_id, resource_extra)
@staticmethod
def _hash_resource(resource):
return hash(tuple(i for i in resource.items() if i[0] != 'metrics'))
def _resource_cache_diff(self, key, attribute_hash):
cached_hash = self.cache.get(key)
return not cached_hash or cached_hash != attribute_hash
def publish_events(self, events):
for event in events:
rd = self._get_resource_definition_from_event(event.event_type)
if not rd:
if event.event_type not in self._already_logged_event_types:
LOG.debug("No gnocchi definition for event type: %s",
event.event_type)
self._already_logged_event_types.add(event.event_type)
continue
rd, operation = rd
if operation == EVENT_DELETE:
self._delete_event(rd, event)
if operation == EVENT_CREATE:
self._create_event(rd, event)
if operation == EVENT_UPDATE:
self._update_event(rd, event)
def _update_event(self, rd, event):
resource = rd.event_attributes(event)
associated_resources = rd.cfg.get('event_associated_resources', {})
if associated_resources:
to_update = itertools.chain([resource], *[
self._search_resource(resource_type, query % resource['id'])
for resource_type, query in associated_resources.items()
])
else:
to_update = [resource]
for resource in to_update:
self._set_update_attributes(resource)
def _delete_event(self, rd, event):
ended_at = timeutils.utcnow().isoformat()
resource = rd.event_attributes(event)
associated_resources = rd.cfg.get('event_associated_resources', {})
if associated_resources:
to_end = itertools.chain([resource], *[
self._search_resource(resource_type, query % resource['id'])
for resource_type, query in associated_resources.items()
])
else:
to_end = [resource]
for resource in to_end:
self._set_ended_at(resource, ended_at)
def _create_event(self, rd, event):
resource = rd.event_attributes(event)
resource_type = resource.pop('type')
try:
self._create_resource(resource_type, resource)
except gnocchi_exc.ResourceAlreadyExists:
LOG.debug("Create event received on existing resource (%s), "
"ignore it.", resource['id'])
except Exception:
LOG.error("Failed to create resource %s", resource,
exc_info=True)
def _search_resource(self, resource_type, query):
try:
return self._gnocchi.resource.search(
resource_type, json.loads(query))
except Exception:
LOG.error("Fail to search resource type %(resource_type)s "
"with '%(query)s'",
{'resource_type': resource_type, 'query': query},
exc_info=True)
return []
def _set_update_attributes(self, resource):
try:
resource_id = resource.pop('id')
resource_type = resource.pop('type')
self._if_not_cached(resource_type, resource_id, resource)
except gnocchi_exc.ResourceNotFound:
LOG.debug("Update event received on unexisting resource (%s), "
"ignore it.", resource['id'])
except Exception:
LOG.error("Fail to update the resource %s", resource,
exc_info=True)
def _set_ended_at(self, resource, ended_at):
try:
self._gnocchi.resource.update(resource['type'], resource['id'],
{'ended_at': ended_at})
except gnocchi_exc.ResourceNotFound:
LOG.debug("Delete event received on unexisting resource (%s), "
"ignore it.", resource['id'])
except Exception:
LOG.error("Fail to update the resource %s", resource,
exc_info=True)
LOG.debug('Resource %s ended at %s' % (resource["id"], ended_at))
|
openstack/ceilometer
|
ceilometer/publisher/gnocchi.py
|
Python
|
apache-2.0
| 23,918
|
# (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests NotificationProcessor"""
import time
import unittest
import mock
from monasca_notification import notification as m_notification
from monasca_notification.processors import notification_processor
class smtpStub(object):
def __init__(self, log_queue):
self.queue = log_queue
def sendmail(self, from_addr, to_addr, msg):
self.queue.put("%s %s %s" % (from_addr, to_addr, msg))
class requestsResponse(object):
def __init__(self, status):
self.status_code = status
class TestNotificationProcessor(unittest.TestCase):
def setUp(self):
self.trap = []
self.email_config = {'server': 'my.smtp.server',
'port': 25,
'user': None,
'password': None,
'timeout': 60,
'from_addr': 'hpcs.mon@hp.com'}
self.mysql_config = {'ssl': None,
'host': 'mysql_host',
'port': 'mysql_port',
'user': 'mysql_user',
'db': 'dbname',
'passwd': 'mysql_passwd'}
self.statsd_config = {'host': 'localhost',
'port': 8125}
def tearDown(self):
pass
# ------------------------------------------------------------------------
# Test helper functions
# ------------------------------------------------------------------------
@mock.patch('pymysql.connect')
@mock.patch('monasca_notification.common.utils.monascastatsd')
@mock.patch('monasca_notification.types.notifiers.email_notifier.smtplib')
@mock.patch('monasca_notification.processors.notification_processor.notifiers.log')
def _start_processor(self, notifications, mock_log, mock_smtp, mock_statsd, mock_pymsql):
"""Start the processor with the proper mocks
"""
# Since the log runs in another thread I can mock it directly, instead change the methods to put to a queue
mock_log.warn = self.trap.append
mock_log.error = self.trap.append
mock_smtp.SMTP = self._smtpStub
config = {}
config["email"] = self.email_config
config["mysql"] = self.mysql_config
config["statsd"] = self.statsd_config
config["notification_types"] = {}
processor = (notification_processor.NotificationProcessor(config))
processor.send(notifications)
def _smtpStub(self, *arg, **kwargs):
return smtpStub(self.trap)
def email_setup(self, metric):
alarm_dict = {"tenantId": "0",
"alarmId": "0",
"alarmName": "test Alarm",
"alarmDescription": "test alarm description",
"oldState": "OK",
"newState": "ALARM",
"severity": "LOW",
"link": "some-link",
"lifecycleState": "OPEN",
"stateChangeReason": "I am alarming!",
"timestamp": time.time(),
"metrics": metric}
notification = m_notification.Notification(0, 'email', 'email notification', 'me@here.com', 0, 0, alarm_dict)
self._start_processor([notification])
# ------------------------------------------------------------------------
# Unit tests
# ------------------------------------------------------------------------
def test_invalid_notification(self):
"""Verify invalid notification type is rejected.
"""
alarm_dict = {"tenantId": "0", "alarmId": "0", "alarmName": "test Alarm", "oldState": "OK", "newState": "ALARM",
"alarmDescription": "test alarm description",
"stateChangeReason": "I am alarming!", "timestamp": time.time(), "metrics": "cpu_util",
"severity": "LOW", "link": "http://some-place.com", "lifecycleState": "OPEN"}
invalid_notification = m_notification.Notification(0, 'invalid', 'test notification',
'me@here.com', 0, 0, alarm_dict)
self._start_processor([invalid_notification])
self.assertIn('attempting to send unconfigured notification: invalid', self.trap)
def test_email_notification_single_host(self):
"""Email with single host
"""
metrics = []
metric_data = {'dimensions': {'hostname': 'foo1', 'service': 'bar1'}}
metrics.append(metric_data)
self.email_setup(metrics)
for msg in self.trap:
if "From: hpcs.mon@hp.com" in msg:
self.assertRegexpMatches(msg, "From: hpcs.mon@hp.com")
self.assertRegexpMatches(msg, "To: me@here.com")
self.assertRegexpMatches(msg, "Content-Type: text/plain")
self.assertRegexpMatches(msg, "Alarm .test Alarm.")
self.assertRegexpMatches(msg, "On host .foo1.")
|
sapcc/monasca-notification
|
tests/test_notification_processor.py
|
Python
|
apache-2.0
| 5,662
|
#!/usr/bin/env python
from setuptools import setup, find_packages
tests_require = [
'flake8',
'mock==0.8',
'pytest',
'pytest-django-lite',
]
install_requires = [
'Django>=1.5,<1.6',
'psycopg2',
]
setup(
name='django-db-routes',
version='0.1.0',
author='David Cramer',
author_email='dcramer@gmail.com',
url='http://github.com/getsentry/django-db-routes',
description='Shard management for the Django ORM',
packages=find_packages('src'),
package_dir={'': 'src'},
zip_safe=False,
extras_require={
'tests': install_requires + tests_require,
},
install_requires=install_requires,
tests_require=tests_require,
include_package_data=True,
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
dcramer/django-db-routes
|
setup.py
|
Python
|
apache-2.0
| 968
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
import multiprocessing
import utool
from vtool.tests import grabdata
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_DELETE_ANNOTATION_CHIPS]')
def TEST_DELETE_ANNOTATION_CHIPS(ibs, back):
gpath_list = grabdata.get_test_gpaths(ndata=None)[0:4]
gid_list = ibs.add_images(gpath_list)
bbox_list = [(0, 0, 100, 100)] * len(gid_list)
name_list = ['a', 'b', 'a', 'd']
aid_list = ibs.add_annots(gid_list, bbox_list=bbox_list, name_list=name_list)
assert len(aid_list) != 0, "No annotations"
aid = aid_list[0]
gid = ibs.get_annot_gids(aid)
assert gid is not None, "gid for aid=%r is None" % (aid,)
gthumbpath = ibs.get_image_thumbpath(gid)
annotation_thumbpath = ibs.get_annot_chip_thumbpath(aid)
ibs.delete_annot_chips(aid)
aid_list = ibs.get_valid_aids()
assert aid in aid_list, "Error: Annotation deleted"
assert not utool.checkpath(gthumbpath), "Image Thumbnail not deleted"
assert not utool.checkpath(annotation_thumbpath), "Roi Thumbnail not deleted"
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb_empty', gui=False,
allow_newdir=True, delete_ibsdir=True)
ibs = main_locals['ibs'] # IBEIS Control
back = main_locals['back'] # IBEIS GUI backend
test_locals = utool.run_test(TEST_DELETE_ANNOTATION_CHIPS, ibs, back)
exec(utool.execstr_dict(test_locals, 'test_locals'))
exec(utool.ipython_execstr())
|
SU-ECE-17-7/ibeis
|
_broken/test_delete_annotation_chips.py
|
Python
|
apache-2.0
| 1,694
|
#!/usr/bin/env python
"""
Source material taken from the spipylib package at Wadsworth
* Spiderarray.py
http://www.wadsworth.org/spider_doc/spider/docs/python/spipylib/array.html
AND
* Spiderutils.py
http://www.wadsworth.org/spider_doc/spider/docs/python/spipylib/library.html
# Spider Python Library: Spiderarray.py
# Copyright (C) 2006 Health Research Inc.
#
# HEALTH RESEARCH INCORPORATED (HRI),
# ONE UNIVERSITY PLACE, RENSSELAER, NY 12144-3455
#
# Email: spider@wadsworth.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
Modified on March 14, 2008 by Neil Voss:
* Merged Spiderutils.py and Spiderarray.py
* Removed unnecessary DOC file stuff
* Changed Numeric to numpy
* Allowed read first image of a stack
"""
import sys, struct
import numpy
import os
# --------------------------------------------------------------------
iforms = [1,3,-11,-12,-21,-22]
# --------------------------------------------------------------------
SpiderHeaderDict = {
1 : 'nslice ', 2 : 'nrow ', 3 : 'irec ', 4 : 'nhistrec ',
5 : 'iform ', 6 : 'imami ', 7 : 'fmax ', 8 : 'fmin ',
9 : 'av ', 10 : 'sig ', 11 : 'ihist ', 12 : 'nsam ',
13 : 'labrec ', 14 : 'iangle ', 15 : 'phi ', 16 : 'theta ',
17 : 'gamma ', 18 : 'xoff ', 19 : 'yoff ', 20 : 'zoff ',
21 : 'scale ', 22 : 'labbyt ', 23 : 'lenbyt ', 24 : 'istack ',
25 : 'NOTUSED ', 26 : 'maxim ', 27 : 'imgnum ', 28 : 'lastindx ',
29 : 'unused ', 30 : 'unused ', 31 : 'Kangle ', 32 : 'phi1 ',
33 : 'theta1 ', 34 : 'psi1 ', 35 : 'phi2 ', 36 : 'theta2 ',
37 : 'psi2 '}
# --------------------------------------------------------------------
def getHeaderDict(hdr):
hdrdict = {}
#hdrdict['header'] = hdr
hdrlen = len(hdr)
hdrdict['bigendian'] = hdr[0]
for i in range(1, hdrlen):
if i in SpiderHeaderDict:
name = SpiderHeaderDict[i]
if name in ['NOTUSED', 'unused']:
continue
val = hdr[i]
hdrdict[name.strip()] = val
if hdrlen > 9:
hdrdict['avg'] = hdr[9] # alternate access format
if hdrlen > 31:
hdrdict['kangle'] = hdr[31]
#import pprint
#pprint.pprint(hdrdict)
return hdrdict
# --------------------------------------------------------------------
def read(filename):
" Convert a SPIDER file into a numpy array "
#print "reading SPIDER file "+filename
return spider2array(filename)
# --------------------------------------------------------------------
def spider2array(filename):
if not os.path.isfile(filename):
return None
" Convert a SPIDER file into a numpy array "
hdr = getSpiderHeader(filename)
hdrdict = getHeaderDict(hdr) # a class that simplifies accessing header elements
hdrbytes = int(hdrdict['labbyt'])
iform = int(hdrdict['iform'])
#for val in hdrdict:
# print val, hdrdict[val]
if iform == 1:
isVolume = False
elif iform == 3:
print "opening volume"
isVolume = True # to do: support for Fourier iforms
else:
print "iform %d not supported" % iform
return None
if hdrdict['istack'] > 0:
isStack = True
else:
isStack = False
xsize = int(hdrdict['nsam'])
ysize = int(hdrdict['nrow'])
if isVolume:
zsize = int(hdrdict['nslice'])
datawords = xsize * ysize * zsize
elif isStack:
ysize += 2
datawords = xsize * ysize
else:
datawords = xsize * ysize
databytes = datawords * 4
# seek ahead to the data
#print "read"
fp = open(filename,'rb')
fp.seek(hdrbytes)
if int(hdrdict['bigendian']):
#print "using big endian"
fmt = '>%df' % datawords
else:
#print "using small endian"
fmt = '<%df' % datawords
arr = numpy.fromfile(fp, dtype=numpy.dtype(fmt))
"""
if hdrdict['bigendian']:
data = fp.read(databytes)
#print "unpack"
t = struct.unpack(fmt, data)
# the numpy function 'array' will automatically upcast
# to 64 bits if you don't use savespace
#print "convert"
arr = numpy.array(t, dtype=numpy.dtype(fmt))
arr = numpy.fromfile(fp, dtype=numpy.dtype(fmt))
"""
fp.close()
if isVolume:
arr.shape = zsize, ysize, xsize
elif isStack:
arr.shape = ysize, xsize
arr = arr[2:ysize,:]
else:
arr.shape = ysize, xsize
return arr
# --------------------------------------------------------------------
def write(arr, filename):
" Convert a numpy array into a SPIDER file "
#print "writing SPIDER file "+filename
return array2spider(arr, filename)
# --------------------------------------------------------------------
def array2spider(arr, filename):
" Convert a numpy array into a SPIDER file "
# create and write the SPIDER header
hdr = makeSpiderHeader(arr.shape)
if len(hdr) < 256:
raise IOError, "Error creating Spider header"
try:
fp = open(filename, 'wb')
fp.writelines(hdr)
except:
raise IOError, "Unable to open %s for writing" % filename
# write image data
farr = numpy.array(arr, dtype=numpy.dtype('>f4'))
farr.tofile(fp)
#fp.write(farr.tostring())
fp.close
# --------------------------------------------------------------------
def getSpiderHeader(filename, n=27):
" returns first n numbers, with Spider indices (starting at 1)"
" if n = 'all', returns entire header "
if not os.path.exists(filename):
print "file does not exist"
return 0
getall = 0
if not isInt(n):
n = 27
getall = 1
nwords = n * 4 # no. floating point words
if os.path.getsize(filename) < nwords:
print "file is the wrong size"
return 0
try:
fp = open(filename,'rb')
f = fp.read(nwords) # read 27 * 4 bytes
fp.close()
except:
print "failed to open file"
return 0
bigendian = 1
bigformat = '>%df' % n
t = struct.unpack(bigformat,f) # try big-endian first
hdr = isSpiderHeader(t)
if hdr == 0:
#print "reading small endian"
bigendian = 0
littleformat = '<%df' % n
t = struct.unpack(littleformat,f) # little-endian
hdr = isSpiderHeader(t)
if hdr == 0:
print "header is null"
return 0
else:
# check if user requested the entire header
if getall:
labbyt = int(hdr[22]) # total no. of bytes in header
hdr = getSpiderHeader(filename, n=labbyt)
hdr = list(hdr)
hdr[0] = bigendian
return hdr
# --------------------------------------------------------------------
def makeSpiderHeader(dims):
" dims must be (nsam, nrow), or (nsam, nrow, nslice) "
if len(dims) == 2:
nsam, nrow = dims[1], dims[0]
nslice = 1.0
iform = 1.0
isVolume = 0
elif len(dims) == 3:
nsam, nrow, nslice = dims[1], dims[0], dims[2]
iform = 3.0
isVolume = 1
else:
return []
lenbyt = nsam * 4 # There are labrec records in the header
labrec = 1024 / lenbyt
if 1024%lenbyt != 0: labrec += 1
labbyt = labrec * lenbyt
hdr = []
nvalues = labbyt / 4
for i in range(nvalues):
hdr.append(0.0)
if len(hdr) < 23:
return []
# NB these are Fortran indices
hdr[1] = float(nslice) # nslice (=1 for an image)
hdr[2] = float(nrow) # number of rows per slice
hdr[5] = iform # iform for 2D image
hdr[12] = float(nsam) # number of pixels per line
hdr[13] = float(labrec) # number of records in file header
hdr[22] = float(labbyt) # total number ofu bytes in header
hdr[23] = float(lenbyt) # record length in bytes
# adjust for Fortran indexing
hdr = hdr[1:]
hdr.append(1.0)
# pack binary data into a string
hdrstr = []
#print "WRITING HEADER"
getHeaderDict(hdr)
for v in hdr:
hdrstr.append(struct.pack('>f', v))
return hdrstr
# --------------------------------------------------------------------
def isSpiderHeader(t):
"returns tuple of values from a valid SPIDER header, else 0"
h = (99,) + t # add 1 value so can use spider header index start=1
# header values 1,2,5,12,13,22,23 should be integers
for i in [1,2,5,12,13,22,23]:
if not isInt(h[i]): return 0
# check iform
iform = int(h[5])
if not iform in iforms: return 0
# check other header values
labrec = int(h[13]) # no. records in file header
labbyt = int(h[22]) # total no. of bytes in header
lenbyt = int(h[23]) # record length in bytes
#print "labrec = %d, labbyt = %d, lenbyt = %d" % (labrec,labbyt,lenbyt)
if labbyt != (labrec * lenbyt): return 0
# looks like a valid header
return h
# --------------------------------------------------------------------
def isInt(f):
"returns 1 if input is an integer"
try:
i = int(f)
if f-i == 0: return 1
else: return 0
except:
return 0
# --------------------------------------------------------------------
def randTest():
print "Running read/write test"
### create random array
array1 = numpy.random.random((160,160))
print "********", array1.mean(), array1.std(), array1.shape
### write to file
write(array1, "rand1.spi")
### read array back in
array2 = read("rand1.spi")
print "********", array2.mean(), array2.std(), array2.shape
### convert using eman
import subprocess
emancmd = "proc2d rand1.spi rand3.spi spiderswap-single"
proc = subprocess.Popen(emancmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
### read eman array
array3 = read("rand3.spi")
print "********", array3.mean(), array3.std(), array3.shape
### copy with spider
import spyder
spider = spyder.SpiderSession(logo=False)
spider.toSpider("CP", "rand1", "rand2")
spider.toSpider("CP", "rand3", "rand4")
spider.close()
### read arrays
array4 = read("rand2.spi")
print "********", array4.mean(), array4.std(), array4.shape
array5 = read("rand4.spi")
print "********", array5.mean(), array5.std(), array5.shape
### direct convert using eman
from pyami import mrc
mrc.write(array1, "rand1.mrc")
emancmd = "proc2d rand1.mrc rand5.spi spiderswap-single"
proc = subprocess.Popen(emancmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
### read eman array
array6 = read("rand5.spi")
print "********", array6.mean(), array6.std(), array6.shape
def test_read_equals_write():
'''write out an image and test that it is the same if we read it back in'''
r,c = 128,256
test_array1 = numpy.arange(r*c, dtype=numpy.float32)
test_array1.shape = r,c
print 'array to write:'
print test_array1
print 'writing...'
write(test_array1, 'test.spi')
print 'reading...'
test_array2 = read('test.spi')
print 'array read:'
print test_array2
## test that shapes are the same
assert test_array1.shape == test_array2.shape
## test that values are the same
assert numpy.alltrue(test_array1 == test_array2)
print 'test completed successfully'
# --------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv[1:]) < 2:
randTest()
print "Usage: spi2arr.py spiderfile outfile"
sys.exit(1)
filename = sys.argv[1]
outfile = sys.argv[2]
arr = read(filename)
b = arr * -1 # perform a simple array operation
write(b, outfile)
|
vossman/ctfeval
|
pyami/spider.py
|
Python
|
apache-2.0
| 11,056
|
# Copyright ? 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Middleware to replace the plain text message body of an error
response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
import six
from zun.common.i18n import _
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Request for this state, modified by replace_start_response()
# and used when an error is being reported.
state = {}
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
status_code = int(status.split(' ')[0])
state['status_code'] = status_code
except (ValueError, TypeError): # pragma: nocover
raise Exception(_(
'ErrorDocumentMiddleware received an invalid '
'status %s') % status)
else:
if (state['status_code'] // 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
headers = [(h, v)
for (h, v) in headers
if h not in ('Content-Length', 'Content-Type')
]
# Save the headers in case we need to modify them.
state['headers'] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
if (state['status_code'] // 100) not in (2, 3):
errs = []
for err_str in app_iter:
err = {}
try:
err = json.loads(err_str.decode('utf-8'))
except ValueError:
pass
if 'title' in err and 'description' in err:
title = err['title']
desc = err['description']
else:
title = ''
desc = ''
code = err['faultcode'].lower() if 'faultcode' in err else ''
errs.append({
'request_id': '',
'code': code,
'status': state['status_code'],
'title': title,
'detail': desc,
'links': []
})
body = [six.b(json.dumps({'errors': errs}))]
state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0]))))
else:
body = app_iter
return body
|
kevin-zhaoshuai/zun
|
zun/api/middleware/parsable_error.py
|
Python
|
apache-2.0
| 3,499
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Service(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1Service - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ServiceSpec',
'status': 'V1ServiceStatus'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._spec = spec
self._status = status
@property
def api_version(self):
"""
Gets the api_version of this V1Service.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1Service.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1Service.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1Service.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1Service.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1Service.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Service.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1Service.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1Service.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1Service.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1Service.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1Service.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1Service.
Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:return: The spec of this V1Service.
:rtype: V1ServiceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1Service.
Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V1Service.
:type: V1ServiceSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1Service.
Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:return: The status of this V1Service.
:rtype: V1ServiceStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1Service.
Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:param status: The status of this V1Service.
:type: V1ServiceStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
skuda/client-python
|
kubernetes/client/models/v1_service.py
|
Python
|
apache-2.0
| 6,978
|
"""Defines the class for filtering data"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
from data.filter.exceptions import InvalidDataFilter
from storage.models import ScaleFile
from util.validation import ValidationWarning
logger = logging.getLogger(__name__)
FILE_TYPES = {'filename', 'media-type', 'data-type', 'meta-data'}
STRING_TYPES = {'string', 'filename', 'media-type', 'data-type'}
STRING_CONDITIONS = {'==', '!=', 'in', 'not in', 'contains'}
NUMBER_TYPES = {'integer', 'number'}
NUMBER_CONDITIONS = {'<', '<=', '>','>=', '==', '!=', 'between', 'in', 'not in'}
BOOL_TYPES = {'boolean'}
BOOL_CONDITIONS = {'==', '!='}
OBJECT_TYPES = {'meta-data', 'object'}
OBJECT_CONDITIONS = {'subset of', 'superset of'}
def _less_than(input, values):
"""Checks if the given input is < the first value in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input < values[0]
except IndexError:
return False
def _less_than_equal(input, values):
"""Checks if the given input is <= the first value in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input <= values[0]
except IndexError:
return False
def _greater_than(input, values):
"""Checks if the given input is > the first value in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input > values[0]
except IndexError:
return False
def _greater_than_equal(input, values):
"""Checks if the given input is >= the first value in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input >= values[0]
except IndexError:
return False
def _equal(input, values):
"""Checks if the given input is equal to the first value in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input == values[0]
except IndexError:
return False
def _not_equal(input, values):
"""Checks if the given input is not equal to the first value in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input != values[0]
except IndexError:
return False
def _between(input, values):
"""Checks if the given input is between the first two values in the list
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return input >= values[0] and input <= values[1]
except IndexError:
return False
def _in(input, values):
"""Checks if the given input is in the list of values
:param input: The input to check
:type input: int/float
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
if input in values:
return True
except TypeError:
return False
return False
def _not_in(input, values):
"""Checks if the given input is not in the list of values
:param input: The input to check
:type input: int/float/string
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
if input in values:
return False
except TypeError:
return True
return True
def _contains(input, values):
"""Checks if the given input contains a value from the given list
:param input: The input to check
:type input: string/list
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
for value in values:
if value in input:
return True
except TypeError:
return False # catch error if input is not an iterable
return False
def _subset(input, values):
"""Checks if the given input is a subset of the given value
:param input: The input to check
:type input: dict
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return all(item in values[0].items() for item in input.items())
except AttributeError:
return False # catch error if input or values are not a dictionary
except IndexError:
return False
return False
def _superset(input, values):
"""Checks if the given input is a superset of the given value
:param input: The input to check
:type input: dict
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
return all(item in input.items() for item in values[0].items())
except AttributeError:
return False # catch error if input or values are not a dictionary
except IndexError:
return False
return False
ALL_CONDITIONS = {'<': _less_than, '<=': _less_than_equal, '>': _greater_than,'>=': _greater_than_equal,
'==': _equal, '!=': _not_equal, 'between': _between, 'in': _in, 'not in': _not_in,
'contains': _contains, 'subset of': _subset, 'superset of': _superset}
def _getNestedDictField(data_dict, map_list):
try:
for k in map_list: data_dict = data_dict[k]
return data_dict
except KeyError:
return None
class DataFilter(object):
"""Represents a filter that either accepts or denies a set of data values
"""
def __init__(self, filter_list=None, all=True):
"""Constructor
:param filters: Filters to determine whether to accept or deny data
:type filters: dict
:param all: Whether all filters need to pass to accept data
:type filters: boolean
"""
# TODO: after implementing this class, implement recipe.definition.node.ConditionNodeDefinition.__init__
if not filter_list:
filter_list = []
self.filter_list = filter_list
self.all = all
def add_filter(self, filter_dict):
"""Adds a filter definition
:param filter_dict: data filter to add
:type filter_dict: dict
:raises :class:`recipe.definition.exceptions.InvalidDataFilter`: If the filter is invalid
"""
filter_dict = DataFilter.validate_filter(filter_dict)
self.filter_list.append(filter_dict)
def is_data_accepted(self, data):
"""Indicates whether the given data passes the filter or not
:param data: The data to check against the filter
:type data: :class:`data.data.data.Data`
:returns: True if the data is accepted, False if the data is denied
:rtype: bool
"""
success = True
for f in self.filter_list:
name = f['name']
filter_type = f['type']
cond = f['condition']
values = f['values']
filter_success = False
all_fields = False
if 'all_fields' in f and f['all_files']:
all_fields = True
all_files = False
if 'all_files' in f and f['all_files']:
all_files = True
if name in data.values:
param = data.values[name]
try:
if filter_type in {'filename', 'media-type', 'data-type'}:
if filter_type == 'filename':
file_values = [scale_file.file_name for scale_file in ScaleFile.objects.filter(id__in=param.file_ids)]
elif filter_type == 'media-type':
file_values = [scale_file.media_type for scale_file in ScaleFile.objects.filter(id__in=param.file_ids)]
elif filter_type == 'data-type':
list_of_lists = [scale_file.data_type_tags for scale_file in ScaleFile.objects.filter(id__in=param.file_ids)]
file_values = [item for sublist in list_of_lists for item in sublist]
# attempt to run condition on list, i.e. in case we're checking 'contains'
filter_success |= ALL_CONDITIONS[cond](file_values, values)
file_success = all_files
for value in file_values:
if all_files:
# attempt to run condition on individual items, if any fail we fail the filter
file_success &= ALL_CONDITIONS[cond](value, values)
else:
# attempt to run condition on individual items, if any succeed we pass the filter
file_success |= ALL_CONDITIONS[cond](value, values)
filter_success |= file_success
elif filter_type == 'meta-data':
meta_data_list = [scale_file.meta_data for scale_file in ScaleFile.objects.filter(id__in=param.file_ids)]
if 'fields' in f:
if len(f['fields']) != len(values):
logger.exception('Length of fields (%s) and values (%s) are not equal' % (f['fields'], values))
return False
file_success = all_files
for meta_data in meta_data_list:
field_success = all_fields
for field_path, value in zip(f['fields'], values):
item = _getNestedDictField(meta_data, field_path)
if all_fields:
# attempt to run condition on individual items, if any fail we fail the filter
field_success &= ALL_CONDITIONS[cond](item, value)
else:
# attempt to run condition on individual items, if any succeed we pass the filter
field_success |= ALL_CONDITIONS[cond](item, value)
if all_files:
file_success &= field_success
else:
file_success |= field_success
filter_success |= file_success
else:
filter_success |= ALL_CONDITIONS[cond](meta_data_list, values)
file_success = all_files
for item in meta_data_list:
if all_files:
# attempt to run condition on individual items, if any fail we fail the filter
file_success &= ALL_CONDITIONS[cond](item, values)
else:
# attempt to run condition on individual items, if any succeed we pass the filter
file_success |= ALL_CONDITIONS[cond](item, values)
filter_success |= file_success
elif filter_type == 'object':
if 'fields' in f:
if len(f['fields']) != len(values):
logger.exception('Length of fields (%s) and values (%s) are not equal' % (f['fields'], values))
return False
field_success = all_fields
for field_path, value in zip(f['fields'], values):
item = _getNestedDictField(param.value, field_path)
if all_fields:
field_success &= ALL_CONDITIONS[cond](item, values)
else:
field_success |= ALL_CONDITIONS[cond](item, values)
filter_success |= field_success
else:
filter_success |= ALL_CONDITIONS[cond](param.value, values)
else:
filter_success |= ALL_CONDITIONS[cond](param.value, values)
except AttributeError:
logger.error('Attempting to run file filter on json parameter or vice versa')
success = False
except KeyError:
logger.error('Condition %s does not exist' % cond)
success = False
except ScaleFile.DoesNotExist:
logger.error('Attempting to run file filter on non-existant file(s): %d' % param.file_ids)
success = False
if filter_success and not self.all:
return True # One filter passed, so return True
if not filter_success and self.all:
return False # One filter failed, so return False
success &= filter_success
return success
def is_filter_equal(self, data_filter):
"""Indicates whether the given data filter is equal to this filter or not
:param data_filter: The data filter
:type data_filter: :class:`data.filter.filter.DataFilter`
:returns: True if the data filter is equal to this one, False otherwise
:rtype: bool
"""
equal = self.all == data_filter.all
equal &= self.filter_list == data_filter.filter_list
return equal
def validate(self, interface):
"""Validates this data filter against the given interface
:param interface: The interface describing the data that will be passed to the filter
:type interface: :class:`data.interface.interface.Interface`
:returns: A list of warnings discovered during validation
:rtype: :func:`list`
:raises :class:`data.filter.exceptions.InvalidDataFilter`: If the data filter is invalid
"""
warnings = []
unmatched = interface.parameters.keys()
for f in self.filter_list:
name = f['name']
filter_type = f['type']
if name in interface.parameters:
if name in unmatched:
unmatched.remove(name)
if interface.parameters[name].param_type == 'file' and filter_type not in FILE_TYPES:
raise InvalidDataFilter('MISMATCHED_TYPE', 'Interface parameter is a file type and requires a file type filter.')
if interface.parameters[name].param_type == 'json' and filter_type in FILE_TYPES:
raise InvalidDataFilter('MISMATCHED_TYPE', 'Interface parameter is a json type and will not work with a file type filter.')
if interface.parameters[name].param_type == 'json':
if interface.parameters[name].json_type in STRING_TYPES and filter_type not in STRING_TYPES:
raise InvalidDataFilter('MISMATCHED_TYPE', 'Interface parameter is a string and filter is not a string type filter')
if interface.parameters[name].json_type in NUMBER_TYPES and filter_type not in NUMBER_TYPES:
raise InvalidDataFilter('MISMATCHED_TYPE', 'Interface parameter is a number and filter is not a number type filter')
if interface.parameters[name].json_type in BOOL_TYPES and filter_type not in BOOL_TYPES:
raise InvalidDataFilter('MISMATCHED_TYPE', 'Interface parameter is a number and filter is not a number type filter')
json_type = interface.parameters[name].json_type
if json_type not in BOOL_TYPES and json_type not in STRING_TYPES and json_type not in NUMBER_TYPES:
raise InvalidDataFilter('MISMATCHED_TYPE', 'Interface parameter type is not supported by data filters')
else:
warnings.append(ValidationWarning('UNMATCHED_FILTER',
'Filter with name \'%s\' does not have a matching parameter'))
if unmatched:
warnings.append(ValidationWarning('UNMATCHED_PARAMETERS', 'No matching filters for these parameters: \'%s\' ' % unmatched))
return warnings
@staticmethod
def validate_filter(filter_dict):
"""Validates a data filter dictionary
:param filter_dict: data filter to validate
:type filter_dict: dict
:raises :class:`recipe.definition.exceptions.InvalidDataFilter`: If the filter is invalid
:returns: Validated filter if the tests pass
:rtype: dict
"""
if 'name' not in filter_dict:
raise InvalidDataFilter('MISSING_NAME', 'Missing name for filter')
name = filter_dict['name']
if 'type' not in filter_dict:
raise InvalidDataFilter('MISSING_TYPE', 'Missing type for \'%s\'' % name)
if 'condition' not in filter_dict:
raise InvalidDataFilter('MISSING_CONDITION', 'Missing condition for \'%s\'' % name)
if 'values' not in filter_dict:
raise InvalidDataFilter('MISSING_VALUES', 'Missing values for \'%s\'' % name)
filter_type = filter_dict['type']
condition = filter_dict['condition']
values = filter_dict['values']
if condition not in ALL_CONDITIONS:
raise InvalidDataFilter('INVALID_CONDITION', 'Invalid condition \'%s\' for \'%s\'. Valid conditions are: %s'
% (condition, name, ALL_CONDITIONS))
if filter_type in STRING_TYPES and condition not in STRING_CONDITIONS:
raise InvalidDataFilter('INVALID_CONDITION', 'Invalid condition \'%s\' for \'%s\'. Valid conditions are: %s'
% (condition, name, STRING_CONDITIONS))
if filter_type in NUMBER_TYPES and condition not in NUMBER_CONDITIONS:
raise InvalidDataFilter('INVALID_CONDITION', 'Invalid condition \'%s\' for \'%s\'. Valid conditions are: %s'
% (condition, name, NUMBER_CONDITIONS))
if filter_type in BOOL_TYPES and condition not in BOOL_CONDITIONS:
raise InvalidDataFilter('INVALID_CONDITION', 'Invalid condition \'%s\' for \'%s\'. Valid conditions are: %s'
% (condition, name, BOOL_CONDITIONS))
if filter_type in OBJECT_TYPES and condition not in OBJECT_CONDITIONS:
if 'fields' not in filter_dict or not filter_dict['fields']:
msg = 'Object %s does not have object condition (%s) and fields property is not set'
raise InvalidDataFilter('INVALID_CONDITION', msg % (name, OBJECT_CONDITIONS))
if 'fields' in filter_dict:
if len(filter_dict['fields']) != len(values):
raise InvalidDataFilter('INVALID_FIELDS', 'Fields property must be same length as values')
if filter_type not in STRING_TYPES and filter_type not in NUMBER_TYPES and filter_type not in BOOL_TYPES and filter_type not in OBJECT_TYPES:
raise InvalidDataFilter('INVALID_TYPE', 'No valid conditions for this type')
filter_values = []
if filter_type == 'number':
for value in values:
try:
filter_values.append(float(value))
except ValueError:
raise InvalidDataFilter('VALUE_ERROR', 'Expected float for \'%s\', found %s' % (name, value))
elif filter_type == 'integer':
for value in values:
try:
filter_values.append(int(value))
except ValueError:
raise InvalidDataFilter('VALUE_ERROR', 'Expected int for \'%s\', found %s' % (name, value))
else:
filter_values.extend(values)
ret_val = copy.deepcopy(filter_dict)
ret_val['values'] = filter_values
return ret_val
|
ngageoint/scale
|
scale/data/filter/filter.py
|
Python
|
apache-2.0
| 21,387
|
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
import bcc
import ctypes
import errno
import os
import subprocess
import shutil
import time
import unittest
class TestUprobes(unittest.TestCase):
def test_simple_library(self):
text = """
#include <uapi/linux/ptrace.h>
BPF_ARRAY(stats, u64, 1);
static void incr(int idx) {
u64 *ptr = stats.lookup(&idx);
if (ptr)
++(*ptr);
}
int count(struct pt_regs *ctx) {
bpf_trace_printk("count() uprobe fired");
u32 pid = bpf_get_current_pid_tgid();
if (pid == PID)
incr(0);
return 0;
}"""
test_pid = os.getpid()
text = text.replace("PID", "%d" % test_pid)
b = bcc.BPF(text=text)
b.attach_uprobe(name="c", sym="malloc_stats", fn_name="count", pid=test_pid)
b.attach_uretprobe(name="c", sym="malloc_stats", fn_name="count", pid=test_pid)
libc = ctypes.CDLL("libc.so.6")
libc.malloc_stats.restype = None
libc.malloc_stats.argtypes = []
libc.malloc_stats()
self.assertEqual(b["stats"][ctypes.c_int(0)].value, 2)
b.detach_uretprobe(name="c", sym="malloc_stats", pid=test_pid)
b.detach_uprobe(name="c", sym="malloc_stats", pid=test_pid)
def test_simple_binary(self):
text = """
#include <uapi/linux/ptrace.h>
BPF_ARRAY(stats, u64, 1);
static void incr(int idx) {
u64 *ptr = stats.lookup(&idx);
if (ptr)
++(*ptr);
}
int count(struct pt_regs *ctx) {
u32 pid = bpf_get_current_pid_tgid();
incr(0);
return 0;
}"""
b = bcc.BPF(text=text)
b.attach_uprobe(name="/usr/bin/python", sym="main", fn_name="count")
b.attach_uretprobe(name="/usr/bin/python", sym="main", fn_name="count")
with os.popen("/usr/bin/python -V") as f:
pass
self.assertGreater(b["stats"][ctypes.c_int(0)].value, 0)
b.detach_uretprobe(name="/usr/bin/python", sym="main")
b.detach_uprobe(name="/usr/bin/python", sym="main")
def test_mount_namespace(self):
text = """
#include <uapi/linux/ptrace.h>
BPF_TABLE("array", int, u64, stats, 1);
static void incr(int idx) {
u64 *ptr = stats.lookup(&idx);
if (ptr)
++(*ptr);
}
int count(struct pt_regs *ctx) {
bpf_trace_printk("count() uprobe fired");
u32 pid = bpf_get_current_pid_tgid();
if (pid == PID)
incr(0);
return 0;
}"""
# Need to import libc from ctypes to access unshare(2)
libc = ctypes.CDLL("libc.so.6", use_errno=True)
# Need to find path to libz.so.1
libz_path = None
p = subprocess.Popen(["ldconfig", "-p"], stdout=subprocess.PIPE)
for l in p.stdout:
n = l.split()
if n[0] == "libz.so.1":
libz_path = n[-1]
p.wait()
p = None
self.assertIsNotNone(libz_path)
# fork a child that we'll place in a separate mount namespace
child_pid = os.fork()
if child_pid == 0:
# Unshare CLONE_NEWNS
if libc.unshare(0x00020000) == -1:
e = ctypes.get_errno()
raise OSError(e, errno.errorcode[e])
# Remount root MS_REC|MS_PRIVATE
if libc.mount(None, "/", None, (1<<14)|(1<<18) , None) == -1:
e = ctypes.get_errno()
raise OSError(e, errno.errorcode[e])
if libc.mount("tmpfs", "/tmp", "tmpfs", 0, None) == -1:
e = ctypes.get_errno()
raise OSError(e, errno.errorcode[e])
shutil.copy(libz_path, "/tmp")
libz = ctypes.CDLL("/tmp/libz.so.1")
time.sleep(1)
libz.zlibVersion()
time.sleep(5)
os._exit(0)
libname = "/tmp/libz.so.1"
symname = "zlibVersion"
text = text.replace("PID", "%d" % child_pid)
b = bcc.BPF(text=text)
b.attach_uprobe(name=libname, sym=symname, fn_name="count", pid=child_pid)
b.attach_uretprobe(name=libname, sym=symname, fn_name="count", pid=child_pid)
time.sleep(1)
self.assertEqual(b["stats"][ctypes.c_int(0)].value, 2)
b.detach_uretprobe(name=libname, sym=symname, pid=child_pid)
b.detach_uprobe(name=libname, sym=symname, pid=child_pid)
os.wait()
if __name__ == "__main__":
unittest.main()
|
shodoco/bcc
|
tests/python/test_uprobes.py
|
Python
|
apache-2.0
| 4,406
|
# Copyright (c) 2019 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""PowerMax backend for the Dell EMC Manila driver."""
import copy
import random
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import units
from manila.common import constants as const
from manila import exception
from manila.i18n import _
from manila.share.drivers.dell_emc.common.enas import constants
from manila.share.drivers.dell_emc.common.enas import utils as enas_utils
from manila.share.drivers.dell_emc.plugins import base as driver
from manila.share.drivers.dell_emc.plugins.powermax import (
object_manager as manager)
from manila.share import utils as share_utils
from manila import utils
"""Version history:
1.0.0 - Initial version
2.0.0 - Implement IPv6 support
3.0.0 - Rebranding to PowerMax
3.1.0 - Access Host details prevents a read-only share mounts
(bug #1845147)
3.2.0 - Wrong format of export locations (bug #1871999)
3.3.0 - Victoria release
3.4.0 - Wallaby release
3.5.0 - Xena release
"""
VERSION = "3.5.0"
LOG = log.getLogger(__name__)
POWERMAX_OPTS = [
cfg.StrOpt('powermax_server_container',
help='Data mover to host the NAS server.'),
cfg.ListOpt('powermax_share_data_pools',
help='Comma separated list of pools that can be used to '
'persist share data.'),
cfg.ListOpt('powermax_ethernet_ports',
help='Comma separated list of ports that can be used for '
'share server interfaces. Members of the list '
'can be Unix-style glob expressions.')
]
CONF = cfg.CONF
CONF.register_opts(POWERMAX_OPTS)
@enas_utils.decorate_all_methods(enas_utils.log_enter_exit,
debug_only=True)
class PowerMaxStorageConnection(driver.StorageConnection):
"""Implements powermax specific functionality for Dell EMC Manila driver.
"""
@enas_utils.log_enter_exit
def __init__(self, *args, **kwargs):
super(PowerMaxStorageConnection, self).__init__(*args, **kwargs)
if 'configuration' in kwargs:
kwargs['configuration'].append_config_values(POWERMAX_OPTS)
self.mover_name = None
self.pools = None
self.manager = None
self.pool_conf = None
self.reserved_percentage = None
self.reserved_snapshot_percentage = None
self.driver_handles_share_servers = True
self.port_conf = None
self.ipv6_implemented = True
self.dhss_mandatory_security_service_association = {
'nfs': None,
'cifs': ['active_directory', ]
}
def create_share(self, context, share, share_server=None):
"""Create a share and export it based on protocol used."""
share_name = share['id']
size = share['size'] * units.Ki
share_proto = share['share_proto'].upper()
# Validate the share protocol
if share_proto not in ('NFS', 'CIFS'):
raise exception.InvalidShare(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
# Get the pool name from share host field
pool_name = share_utils.extract_host(share['host'], level='pool')
if not pool_name:
message = (_("Pool is not available in the share host %s.") %
share['host'])
raise exception.InvalidHost(reason=message)
# Validate share server
self._share_server_validation(share_server)
if share_proto == 'CIFS':
vdm_name = self._get_share_server_name(share_server)
server_name = vdm_name
# Check if CIFS server exists.
status, server = self._get_context('CIFSServer').get(server_name,
vdm_name)
if status != constants.STATUS_OK:
message = (_("CIFS server %s not found.") % server_name)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
self._allocate_container(share_name, size, share_server, pool_name)
if share_proto == 'NFS':
location = self._create_nfs_share(share_name, share_server)
elif share_proto == 'CIFS':
location = self._create_cifs_share(share_name, share_server)
return [
{'path': location}
]
def _share_server_validation(self, share_server):
"""Validate the share server."""
if not share_server:
msg = _('Share server not provided')
raise exception.InvalidInput(reason=msg)
backend_details = share_server.get('backend_details')
vdm = backend_details.get(
'share_server_name') if backend_details else None
if vdm is None:
message = _("No share server found.")
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
def _allocate_container(self, share_name, size, share_server, pool_name):
"""Allocate file system for share."""
vdm_name = self._get_share_server_name(share_server)
self._get_context('FileSystem').create(
share_name, size, pool_name, vdm_name)
def _allocate_container_from_snapshot(self, share, snapshot, share_server,
pool_name):
"""Allocate file system from snapshot."""
vdm_name = self._get_share_server_name(share_server)
interconn_id = self._get_context('Mover').get_interconnect_id(
self.mover_name, self.mover_name)
self._get_context('FileSystem').create_from_snapshot(
share['id'], snapshot['id'], snapshot['share_id'],
pool_name, vdm_name, interconn_id)
nwe_size = share['size'] * units.Ki
self._get_context('FileSystem').extend(share['id'], pool_name,
nwe_size)
@enas_utils.log_enter_exit
def _create_cifs_share(self, share_name, share_server):
"""Create CIFS share."""
vdm_name = self._get_share_server_name(share_server)
server_name = vdm_name
# Get available CIFS Server and interface (one CIFS server per VDM)
status, server = self._get_context('CIFSServer').get(server_name,
vdm_name)
if 'interfaces' not in server or len(server['interfaces']) == 0:
message = (_("CIFS server %s doesn't have interface, "
"so the share is inaccessible.")
% server['compName'])
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
interface = enas_utils.export_unc_path(server['interfaces'][0])
self._get_context('CIFSShare').create(share_name, server['name'],
vdm_name)
self._get_context('CIFSShare').disable_share_access(share_name,
vdm_name)
location = (r'\\%(interface)s\%(name)s' %
{'interface': interface, 'name': share_name})
return location
@enas_utils.log_enter_exit
def _create_nfs_share(self, share_name, share_server):
"""Create NFS share."""
vdm_name = self._get_share_server_name(share_server)
self._get_context('NFSShare').create(share_name, vdm_name)
nfs_if = enas_utils.convert_ipv6_format_if_needed(
share_server['backend_details']['nfs_if'])
return ('%(nfs_if)s:/%(share_name)s'
% {'nfs_if': nfs_if,
'share_name': share_name})
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
share_name = share['id']
share_proto = share['share_proto'].upper()
# Validate the share protocol
if share_proto not in ('NFS', 'CIFS'):
raise exception.InvalidShare(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
# Get the pool name from share host field
pool_name = share_utils.extract_host(share['host'], level='pool')
if not pool_name:
message = (_("Pool is not available in the share host %s.") %
share['host'])
raise exception.InvalidHost(reason=message)
self._share_server_validation(share_server)
self._allocate_container_from_snapshot(
share, snapshot, share_server, pool_name)
nfs_if = enas_utils.convert_ipv6_format_if_needed(
share_server['backend_details']['nfs_if'])
if share_proto == 'NFS':
self._create_nfs_share(share_name, share_server)
location = ('%(nfs_if)s:/%(share_name)s'
% {'nfs_if': nfs_if,
'share_name': share_name})
elif share_proto == 'CIFS':
location = self._create_cifs_share(share_name, share_server)
return [
{'path': location}
]
def create_snapshot(self, context, snapshot, share_server=None):
"""Create snapshot from share."""
share_name = snapshot['share_id']
status, filesystem = self._get_context('FileSystem').get(share_name)
if status != constants.STATUS_OK:
message = (_("File System %s not found.") % share_name)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
pool_id = filesystem['pools_id'][0]
self._get_context('Snapshot').create(snapshot['id'],
snapshot['share_id'],
pool_id)
def delete_share(self, context, share, share_server=None):
"""Delete a share."""
if share_server is None:
LOG.warning("Share network should be specified for "
"share deletion.")
return
share_proto = share['share_proto'].upper()
if share_proto == 'NFS':
self._delete_nfs_share(share, share_server)
elif share_proto == 'CIFS':
self._delete_cifs_share(share, share_server)
else:
raise exception.InvalidShare(
reason=_('Unsupported share protocol'))
@enas_utils.log_enter_exit
def _delete_cifs_share(self, share, share_server):
"""Delete CIFS share."""
vdm_name = self._get_share_server_name(share_server)
name = share['id']
self._get_context('CIFSShare').delete(name, vdm_name)
self._deallocate_container(name, vdm_name)
@enas_utils.log_enter_exit
def _delete_nfs_share(self, share, share_server):
"""Delete NFS share."""
vdm_name = self._get_share_server_name(share_server)
name = share['id']
self._get_context('NFSShare').delete(name, vdm_name)
self._deallocate_container(name, vdm_name)
@enas_utils.log_enter_exit
def _deallocate_container(self, share_name, vdm_name):
"""Delete underneath objects of the share."""
path = '/' + share_name
try:
# Delete mount point
self._get_context('MountPoint').delete(path, vdm_name)
except exception.EMCPowerMaxXMLAPIError as e:
LOG.exception("CIFS server %(name)s on mover %(mover_name)s "
"not found due to error %(err)s. Skip the "
"deletion.",
{'name': path, 'mover_name': vdm_name,
'err': e.message})
try:
# Delete file system
self._get_context('FileSystem').delete(share_name)
except exception.EMCPowerMaxXMLAPIError as e:
LOG.exception("File system %(share_name)s not found due to "
"error %(err)s. Skip the deletion.",
{'share_name': share_name,
'err': e.message})
def delete_snapshot(self, context, snapshot, share_server=None):
"""Delete a snapshot."""
self._get_context('Snapshot').delete(snapshot['id'])
def ensure_share(self, context, share, share_server=None):
"""Ensure that the share is exported."""
def extend_share(self, share, new_size, share_server=None):
# Get the pool name from share host field
pool_name = share_utils.extract_host(share['host'], level='pool')
if not pool_name:
message = (_("Pool is not available in the share host %s.") %
share['host'])
raise exception.InvalidHost(reason=message)
share_name = share['id']
self._get_context('FileSystem').extend(
share_name, pool_name, new_size * units.Ki)
def allow_access(self, context, share, access, share_server=None):
"""Allow access to a share."""
access_level = access['access_level']
if access_level not in const.ACCESS_LEVELS:
raise exception.InvalidShareAccessLevel(level=access_level)
share_proto = share['share_proto']
if share_proto == 'NFS':
self._nfs_allow_access(context, share, access, share_server)
elif share_proto == 'CIFS':
self._cifs_allow_access(context, share, access, share_server)
else:
raise exception.InvalidShare(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
@enas_utils.log_enter_exit
def _cifs_allow_access(self, context, share, access, share_server):
"""Allow access to CIFS share."""
vdm_name = self._get_share_server_name(share_server)
share_name = share['id']
if access['access_type'] != 'user':
reason = _('Only user access type allowed for CIFS share')
raise exception.InvalidShareAccess(reason=reason)
user_name = access['access_to']
access_level = access['access_level']
if access_level == const.ACCESS_LEVEL_RW:
cifs_access = constants.CIFS_ACL_FULLCONTROL
else:
cifs_access = constants.CIFS_ACL_READ
# Check if CIFS server exists.
server_name = vdm_name
status, server = self._get_context('CIFSServer').get(server_name,
vdm_name)
if status != constants.STATUS_OK:
message = (_("CIFS server %s not found.") % server_name)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
self._get_context('CIFSShare').allow_share_access(
vdm_name,
share_name,
user_name,
server['domain'],
access=cifs_access)
@enas_utils.log_enter_exit
def _nfs_allow_access(self, context, share, access, share_server):
"""Allow access to NFS share."""
vdm_name = self._get_share_server_name(share_server)
access_type = access['access_type']
if access_type != 'ip':
reason = _('Only ip access type allowed.')
raise exception.InvalidShareAccess(reason=reason)
host_ip = access['access_to']
access_level = access['access_level']
self._get_context('NFSShare').allow_share_access(
share['id'], host_ip, vdm_name, access_level)
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
# deleting rules
for rule in delete_rules:
self.deny_access(context, share, rule, share_server)
# adding rules
for rule in add_rules:
self.allow_access(context, share, rule, share_server)
# recovery mode
if not (add_rules or delete_rules):
white_list = []
for rule in access_rules:
self.allow_access(context, share, rule, share_server)
white_list.append(
enas_utils.convert_ipv6_format_if_needed(
rule['access_to']))
self.clear_access(share, share_server, white_list)
def clear_access(self, share, share_server, white_list):
share_proto = share['share_proto'].upper()
share_name = share['id']
if share_proto == 'CIFS':
self._cifs_clear_access(share_name, share_server, white_list)
elif share_proto == 'NFS':
self._nfs_clear_access(share_name, share_server, white_list)
@enas_utils.log_enter_exit
def _cifs_clear_access(self, share_name, share_server, white_list):
"""Clear access for CIFS share except hosts in the white list."""
vdm_name = self._get_share_server_name(share_server)
# Check if CIFS server exists.
server_name = vdm_name
status, server = self._get_context('CIFSServer').get(server_name,
vdm_name)
if status != constants.STATUS_OK:
message = (_("CIFS server %(server_name)s has issue. "
"Detail: %(status)s") %
{'server_name': server_name, 'status': status})
raise exception.EMCPowerMaxXMLAPIError(err=message)
self._get_context('CIFSShare').clear_share_access(
share_name=share_name,
mover_name=vdm_name,
domain=server['domain'],
white_list_users=white_list)
@enas_utils.log_enter_exit
def _nfs_clear_access(self, share_name, share_server, white_list):
"""Clear access for NFS share except hosts in the white list."""
self._get_context('NFSShare').clear_share_access(
share_name=share_name,
mover_name=self._get_share_server_name(share_server),
white_list_hosts=white_list)
def deny_access(self, context, share, access, share_server=None):
"""Deny access to a share."""
share_proto = share['share_proto']
if share_proto == 'NFS':
self._nfs_deny_access(share, access, share_server)
elif share_proto == 'CIFS':
self._cifs_deny_access(share, access, share_server)
else:
raise exception.InvalidShare(
reason=_('Unsupported share protocol'))
@enas_utils.log_enter_exit
def _cifs_deny_access(self, share, access, share_server):
"""Deny access to CIFS share."""
vdm_name = self._get_share_server_name(share_server)
share_name = share['id']
if access['access_type'] != 'user':
LOG.warning("Only user access type allowed for CIFS share.")
return
user_name = access['access_to']
access_level = access['access_level']
if access_level == const.ACCESS_LEVEL_RW:
cifs_access = constants.CIFS_ACL_FULLCONTROL
else:
cifs_access = constants.CIFS_ACL_READ
# Check if CIFS server exists.
server_name = vdm_name
status, server = self._get_context('CIFSServer').get(server_name,
vdm_name)
if status != constants.STATUS_OK:
message = (_("CIFS server %s not found.") % server_name)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
self._get_context('CIFSShare').deny_share_access(
vdm_name,
share_name,
user_name,
server['domain'],
access=cifs_access)
@enas_utils.log_enter_exit
def _nfs_deny_access(self, share, access, share_server):
"""Deny access to NFS share."""
vdm_name = self._get_share_server_name(share_server)
access_type = access['access_type']
if access_type != 'ip':
LOG.warning("Only ip access type allowed.")
return
host_ip = enas_utils.convert_ipv6_format_if_needed(access['access_to'])
self._get_context('NFSShare').deny_share_access(share['id'], host_ip,
vdm_name)
def check_for_setup_error(self):
"""Check for setup error."""
# To verify the input from Manila configuration
status, out = self._get_context('Mover').get_ref(self.mover_name,
True)
if constants.STATUS_ERROR == status:
message = (_("Could not find Data Mover by name: %s.") %
self.mover_name)
LOG.error(message)
raise exception.InvalidParameterValue(err=message)
self.pools = self._get_managed_storage_pools(self.pool_conf)
def _get_managed_storage_pools(self, pools):
matched_pools = set()
if pools:
# Get the real pools from the backend storage
status, backend_pools = self._get_context('StoragePool').get_all()
if status != constants.STATUS_OK:
message = (_("Failed to get storage pool information. "
"Reason: %s") % backend_pools)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
real_pools = set([item for item in backend_pools])
conf_pools = set([item.strip() for item in pools])
matched_pools, unmatched_pools = enas_utils.do_match_any(
real_pools, conf_pools)
if not matched_pools:
msg = (_("None of the specified storage pools to be managed "
"exist. Please check your configuration "
"emc_nas_pool_names in manila.conf. "
"The available pools in the backend are %s.") %
",".join(real_pools))
raise exception.InvalidParameterValue(err=msg)
LOG.info("Storage pools: %s will be managed.",
",".join(matched_pools))
else:
LOG.debug("No storage pool is specified, so all pools "
"in storage system will be managed.")
return matched_pools
def connect(self, emc_share_driver, context):
"""Connect to PowerMax NAS server."""
config = emc_share_driver.configuration
config.append_config_values(POWERMAX_OPTS)
self.mover_name = config.safe_get('powermax_server_container')
self.pool_conf = config.safe_get('powermax_share_data_pools')
self.reserved_percentage = config.safe_get('reserved_share_percentage')
if self.reserved_percentage is None:
self.reserved_percentage = 0
self.reserved_snapshot_percentage = config.safe_get(
'reserved_share_from_snapshot_percentage')
if self.reserved_snapshot_percentage is None:
self.reserved_snapshot_percentage = self.reserved_percentage
self.manager = manager.StorageObjectManager(config)
self.port_conf = config.safe_get('powermax_ethernet_ports')
def get_managed_ports(self):
# Get the real ports(devices) list from the backend storage
real_ports = self._get_physical_devices(self.mover_name)
if not self.port_conf:
LOG.debug("No ports are specified, so any of the ports on the "
"Data Mover can be used.")
return real_ports
matched_ports, unmanaged_ports = enas_utils.do_match_any(
real_ports, self.port_conf)
if not matched_ports:
msg = (_("None of the specified network ports exist. "
"Please check your configuration powermax_ethernet_ports "
"in manila.conf. The available ports on the Data Mover "
"are %s.") %
",".join(real_ports))
raise exception.BadConfigurationException(reason=msg)
LOG.debug("Ports: %s can be used.", ",".join(matched_ports))
return list(matched_ports)
def update_share_stats(self, stats_dict):
"""Communicate with EMCNASClient to get the stats."""
stats_dict['driver_version'] = VERSION
self._get_context('Mover').get_ref(self.mover_name, True)
stats_dict['pools'] = []
status, pools = self._get_context('StoragePool').get_all()
for name, pool in pools.items():
if not self.pools or pool['name'] in self.pools:
total_size = float(pool['total_size'])
used_size = float(pool['used_size'])
pool_stat = {
'pool_name': pool['name'],
'total_capacity_gb': enas_utils.mb_to_gb(total_size),
'free_capacity_gb':
enas_utils.mb_to_gb(total_size - used_size),
'qos': False,
'reserved_percentage': self.reserved_percentage,
'reserved_snapshot_percentage':
self.reserved_snapshot_percentage,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': False,
'ipv6_support': True
}
stats_dict['pools'].append(pool_stat)
if not stats_dict['pools']:
message = _("Failed to update storage pool.")
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
def get_pool(self, share):
"""Get the pool name of the share."""
share_name = share['id']
status, filesystem = self._get_context('FileSystem').get(share_name)
if status != constants.STATUS_OK:
message = (_("File System %(name)s not found. "
"Reason: %(err)s") %
{'name': share_name, 'err': filesystem})
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
pool_id = filesystem['pools_id'][0]
# Get the real pools from the backend storage
status, backend_pools = self._get_context('StoragePool').get_all()
if status != constants.STATUS_OK:
message = (_("Failed to get storage pool information. "
"Reason: %s") % backend_pools)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
for name, pool_info in backend_pools.items():
if pool_info['id'] == pool_id:
return name
available_pools = [item for item in backend_pools]
message = (_("No matched pool name for share: %(share)s. "
"Available pools: %(pools)s") %
{'share': share_name, 'pools': available_pools})
raise exception.EMCPowerMaxXMLAPIError(err=message)
def get_network_allocations_number(self):
"""Returns number of network allocations for creating VIFs."""
return constants.IP_ALLOCATIONS
def setup_server(self, network_info, metadata=None):
"""Set up and configure share server.
Sets up and configures share server with given network parameters.
"""
# Only support single security service with type 'active_directory'
vdm_name = network_info['server_id']
vlan_id = network_info['segmentation_id']
active_directory = None
allocated_interfaces = []
if network_info.get('security_services'):
is_valid, active_directory = self._get_valid_security_service(
network_info['security_services'])
if not is_valid:
raise exception.EMCPowerMaxXMLAPIError(err=active_directory)
try:
if not self._vdm_exist(vdm_name):
LOG.debug('Share server %s not found, creating '
'share server...', vdm_name)
self._get_context('VDM').create(vdm_name, self.mover_name)
devices = self.get_managed_ports()
for net_info in network_info['network_allocations']:
random.shuffle(devices)
ip_version = net_info['ip_version']
interface = {
'name': net_info['id'][-12:],
'device_name': devices[0],
'ip': net_info['ip_address'],
'mover_name': self.mover_name,
'vlan_id': vlan_id if vlan_id else -1,
}
if ip_version == 6:
interface['ip_version'] = ip_version
interface['net_mask'] = str(
utils.cidr_to_prefixlen(
network_info['cidr']))
else:
interface['net_mask'] = utils.cidr_to_netmask(
network_info['cidr'])
self._get_context('MoverInterface').create(interface)
allocated_interfaces.append(interface)
cifs_interface = allocated_interfaces[0]
nfs_interface = allocated_interfaces[1]
if active_directory:
self._configure_active_directory(
active_directory, vdm_name, cifs_interface)
self._get_context('VDM').attach_nfs_interface(
vdm_name, nfs_interface['name'])
return {
'share_server_name': vdm_name,
'cifs_if': cifs_interface['ip'],
'nfs_if': nfs_interface['ip'],
}
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Could not setup server')
server_details = self._construct_backend_details(
vdm_name, allocated_interfaces)
self.teardown_server(
server_details, network_info['security_services'])
def _construct_backend_details(self, vdm_name, interfaces):
if_number = len(interfaces)
cifs_if = interfaces[0]['ip'] if if_number > 0 else None
nfs_if = interfaces[1]['ip'] if if_number > 1 else None
return {
'share_server_name': vdm_name,
'cifs_if': cifs_if,
'nfs_if': nfs_if,
}
@enas_utils.log_enter_exit
def _vdm_exist(self, name):
status, out = self._get_context('VDM').get(name)
if constants.STATUS_OK != status:
return False
return True
def _get_physical_devices(self, mover_name):
"""Get a proper network device to create interface."""
devices = self._get_context('Mover').get_physical_devices(mover_name)
if not devices:
message = (_("Could not get physical device port on mover %s.") %
self.mover_name)
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
return devices
def _configure_active_directory(
self, security_service, vdm_name, interface):
domain = security_service['domain']
server = security_service['dns_ip']
self._get_context('DNSDomain').create(self.mover_name, domain, server)
cifs_server_args = {
'name': vdm_name,
'interface_ip': interface['ip'],
'domain_name': security_service['domain'],
'user_name': security_service['user'],
'password': security_service['password'],
'mover_name': vdm_name,
'is_vdm': True,
}
self._get_context('CIFSServer').create(cifs_server_args)
def teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
if not server_details:
LOG.debug('Server details are empty.')
return
vdm_name = server_details.get('share_server_name')
if not vdm_name:
LOG.debug('No share server found in server details.')
return
cifs_if = server_details.get('cifs_if')
nfs_if = server_details.get('nfs_if')
status, vdm = self._get_context('VDM').get(vdm_name)
if constants.STATUS_OK != status:
LOG.debug('Share server %s not found.', vdm_name)
return
interfaces = self._get_context('VDM').get_interfaces(vdm_name)
for if_name in interfaces['nfs']:
self._get_context('VDM').detach_nfs_interface(vdm_name, if_name)
if security_services:
# Only support single security service with type 'active_directory'
is_valid, active_directory = self._get_valid_security_service(
security_services)
if is_valid:
status, servers = self._get_context('CIFSServer').get_all(
vdm_name)
if constants.STATUS_OK != status:
LOG.error('Could not find CIFS server by name: %s.',
vdm_name)
else:
cifs_servers = copy.deepcopy(servers)
for name, server in cifs_servers.items():
# Unjoin CIFS Server from domain
cifs_server_args = {
'name': server['name'],
'join_domain': False,
'user_name': active_directory['user'],
'password': active_directory['password'],
'mover_name': vdm_name,
'is_vdm': True,
}
try:
self._get_context('CIFSServer').modify(
cifs_server_args)
except exception.EMCPowerMaxXMLAPIError as expt:
LOG.debug("Failed to modify CIFS server "
"%(server)s. Reason: %(err)s.",
{'server': server, 'err': expt})
self._get_context('CIFSServer').delete(name, vdm_name)
# Delete interface from Data Mover
if cifs_if:
self._get_context('MoverInterface').delete(cifs_if,
self.mover_name)
if nfs_if:
self._get_context('MoverInterface').delete(nfs_if,
self.mover_name)
# Delete Virtual Data Mover
self._get_context('VDM').delete(vdm_name)
def _get_valid_security_service(self, security_services):
"""Validate security services and return a supported security service.
:param security_services:
:returns: (<is_valid>, <data>) -- <is_valid> is true to indicate
security_services includes zero or single security service for
active directory. Otherwise, it would return false. <data> return
error message when <is_valid> is false. Otherwise, it will
return zero or single security service for active directory.
"""
# Only support single security service with type 'active_directory'
if (len(security_services) > 1 or
(security_services and
security_services[0]['type'] != 'active_directory')):
return False, _("Unsupported security services. "
"Only support single security service and "
"only support type 'active_directory'")
return True, security_services[0]
def _get_share_server_name(self, share_server):
try:
return share_server['backend_details']['share_server_name']
except Exception:
LOG.debug("Didn't get share server name from share_server %s.",
share_server)
return share_server['id']
def _get_context(self, context_type):
return self.manager.getStorageContext(context_type)
|
openstack/manila
|
manila/share/drivers/dell_emc/plugins/powermax/connection.py
|
Python
|
apache-2.0
| 36,844
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities to parse and transform input args structures
"""
from __future__ import absolute_import
import sys
import os
import datetime
import bigml.api
import bigmler.utils as u
from bigml.multivote import COMBINATION_WEIGHTS, COMBINER_MAP
from bigml.tree import LAST_PREDICTION, PROPORTIONAL
from bigmler.resources import ADD_REMOVE_PREFIX
from bigmler.prediction import FULL_FORMAT, COMBINATION, COMBINATION_LABEL
from bigmler.train_reader import AGGREGATES
from bigmler.utils import FILE_ENCODING, PYTHON3
if PYTHON3:
from io import StringIO
else:
from StringIO import StringIO
# Date and time in format SunNov0412_120510 to name and tag resources
NOW = datetime.datetime.now().strftime("%a%b%d%y_%H%M%S")
MISSING_STRATEGIES = {'last': LAST_PREDICTION, 'proportional': PROPORTIONAL}
DEFAULT_DESCRIPTION = "Created using BigMLer"
RESOURCE_TYPES = ["source", "dataset", "model", "ensemble", "batch_prediction",
"cluster", "centroid", "batch_centroid", "anomaly",
"anomaly_score", "batch_anomaly_score"]
def has_test(args):
"""Returns if some kind of test data is given in args.
"""
return (args.test_set or args.test_source or args.test_dataset or
args.test_stdin or args.test_datasets)
def has_train(args):
"""Returns if some kind of train data is given in args.
"""
return (args.training_set or args.source or args.dataset or
args.datasets or args.source_file or args.dataset_file or
args.train_stdin)
def has_model(args):
"""Boolean that is set when any model option is used
"""
return (args.model or args.models or args.model_tag or args.model_file
or args.ensemble or args.ensembles or args.ensemble_tag
or args.ensemble_file)
def has_anomaly(args):
"""Boolean that is set when any anomaly option is used
"""
return (args.anomaly or args.anomalies or args.anomaly_tag or
args.anomaly_file)
def non_compatible(args, option):
"""Return non_compatible options
"""
if option == '--cross-validation-rate':
return (args.test_set or args.evaluate or args.model or args.models or
args.model_tag or args.multi_label)
if option == '--max-categories':
return args.evaluate or args.test_split or args.remote
return False
def get_flags(args):
"""Returns the options set in the command line string normalized ("_" is
transformed to "-")
"""
train_stdin = False
test_stdin = False
flags = []
for i in range(0, len(args)):
if args[i].startswith("--"):
flag = args[i]
# syntax --flag=value
value = None
if "=" in flag:
value = args[i][flag.index("="):]
args[i] = args[i][0: flag.index("=")]
args[i] = args[i].replace("_", "-")
flag = args[i]
if value:
args[i] = "%s%s" % (flag, value)
flags.append(flag)
if (flag == '--train' and (
i == len(args) - 1 or args[i + 1].startswith("--"))):
train_stdin = True
elif (flag == '--test' and (
i == len(args) - 1 or args[i + 1].startswith("--"))):
test_stdin = True
return flags, train_stdin, test_stdin
def get_command_message(args):
"""Rebuilds the command message based on the args list
"""
literal_args = args[:]
for i in range(0, len(args)):
# quoting literals with blanks: 'petal length'
if ' ' in args[i]:
prefix = ""
literal = args[i]
# literals with blanks after "+" or "-": +'petal length'
if args[i][0] in ADD_REMOVE_PREFIX:
prefix = args[i][0]
literal = args[i][1:]
literal_args[i] = '"%s%s"' % (prefix, literal)
return u"bigmler %s\n" % u" ".join(literal_args)
def parse_and_check(command):
"""Parses and checks the given args
"""
parser = command.parser
args = command.args
command_args = parser.parse_args(args)
command_args.train_stdin = command.train_stdin
command_args.test_stdin = command.test_stdin
# Checks options' compatibility
try:
if command_args.cross_validation_rate > 0 and (
non_compatible(command_args, '--cross-validation-rate')):
parser.error("Non compatible flags: --cross-validation-rate"
" cannot be used with --evaluate, --model,"
" --models, --model-tag or --multi-label. Usage:\n\n"
"bigmler --train data/iris.csv "
"--cross-validation-rate 0.1")
except AttributeError:
pass
try:
if command_args.max_categories and (
non_compatible(command_args, '--max-categories')):
parser.error("Non compatible flags: --max-categories cannot "
"be used with --test-split, --remote or --evaluate.")
except AttributeError:
pass
try:
if command_args.train_stdin and command_args.multi_label:
parser.error("Reading multi-label training sets from stream "
"is not yet available.")
except AttributeError:
pass
try:
if command_args.test_stdin and command_args.resume:
parser.error("Can't resume when using stream reading test sets.")
except AttributeError:
pass
try:
if (command_args.evaluate and not has_train(command_args) and
not (has_test(command_args) or command_args.test_split) and
has_model(command_args)):
parser.error("Evaluation wrong syntax.\n"
"\nTry for instance:\n\nbigmler --train data/iris.csv"
" --evaluate\nbigmler --model "
"model/5081d067035d076151000011 --dataset "
"dataset/5081d067035d076151003423 --evaluate\n"
"bigmler --ensemble ensemble/5081d067035d076151003443"
" --dataset "
"dataset/5081d067035d076151003423 --evaluate")
except AttributeError:
pass
try:
command_args.label_aggregates_list = []
if command_args.label_aggregates:
label_aggregates = command_args.label_aggregates.strip().lower()
label_aggregates = label_aggregates.split(
command_args.args_separator)
for aggregate in label_aggregates:
if not aggregate in AGGREGATES:
parser.error("Wrong value for the --label-aggregates "
"option. The allowed values are count, first "
"and last.")
command_args.label_aggregates_list.append(aggregate)
except AttributeError:
pass
return command_args
def get_api_instance(command_args, storage_path):
"""Returns an api instance using the given parameters
"""
api_command_args = {
'username': command_args.username,
'api_key': command_args.api_key,
'dev_mode': command_args.dev_mode,
'debug': command_args.debug}
if command_args.store:
api_command_args.update({'storage': storage_path})
return bigml.api.BigML(**api_command_args)
def get_output_args(api, command_args, resume):
"""Returns the output args needed for the main bigmler computation process
"""
try:
if command_args.train_stdin:
if command_args.test_stdin:
sys.exit("The standard input can't be used both for training "
"and testing. Choose one of them")
command_args.training_set = StringIO(sys.stdin.read())
elif command_args.test_stdin:
command_args.test_set = StringIO(sys.stdin.read())
except AttributeError:
pass
try:
if command_args.objective_field:
objective = command_args.objective_field
try:
command_args.objective_field = int(objective)
except ValueError:
if not command_args.train_header:
sys.exit("The %s has been set as objective field but"
" the file has not been marked as containing"
" headers.\nPlease set the --train-header flag if"
" the file has headers or use a column number"
" to set the objective field." % objective)
except AttributeError:
pass
command_args.resume_ = resume
# Reads description if provided.
try:
if command_args.description:
description_arg = u.read_description(command_args.description)
command_args.description_ = description_arg
else:
command_args.description_ = DEFAULT_DESCRIPTION
except AttributeError:
pass
# Parses fields if provided.
try:
if command_args.field_attributes:
field_attributes_arg = (
u.read_field_attributes(command_args.field_attributes))
command_args.field_attributes_ = field_attributes_arg
else:
command_args.field_attributes_ = []
except AttributeError:
pass
try:
if command_args.test_field_attributes:
field_attributes_arg = (
u.read_field_attributes(command_args.test_field_attributes))
command_args.test_field_attributes_ = field_attributes_arg
else:
command_args.test_field_attributes_ = []
except AttributeError:
pass
# Parses types if provided.
try:
if command_args.types:
types_arg = u.read_types(command_args.types)
command_args.types_ = types_arg
else:
command_args.types_ = None
if command_args.test_types:
types_arg = u.read_types(command_args.test_types)
command_args.test_types_ = types_arg
else:
command_args.test_types_ = None
except AttributeError:
pass
# Parses dataset fields if provided.
try:
if command_args.dataset_fields:
dataset_fields_arg = [
field.strip() for field in command_args.dataset_fields.split(
command_args.args_separator)]
command_args.dataset_fields_ = dataset_fields_arg
else:
command_args.dataset_fields_ = []
except AttributeError:
pass
# Parses model input fields if provided.
try:
if command_args.model_fields:
model_fields_arg = [
field.strip() for field in command_args.model_fields.split(
command_args.args_separator)]
command_args.model_fields_ = model_fields_arg
else:
command_args.model_fields_ = []
except AttributeError:
pass
# Parses cluster input fields if provided.
try:
if command_args.cluster_fields:
cluster_fields_arg = [
field.strip() for field in command_args.cluster_fields.split(
command_args.args_separator)]
command_args.cluster_fields_ = cluster_fields_arg
else:
command_args.cluster_fields_ = []
except AttributeError:
pass
# Parses anomaly input fields if provided.
try:
if command_args.anomaly_fields:
anomaly_fields_arg = [
field.strip() for field in command_args.anomaly_fields.split(
command_args.args_separator)]
command_args.anomaly_fields_ = anomaly_fields_arg
else:
command_args.anomaly_fields_ = []
except AttributeError:
pass
model_ids = []
try:
# Parses model/ids if provided.
if command_args.models:
model_ids = u.read_resources(command_args.models)
command_args.model_ids_ = model_ids
except AttributeError:
pass
# Retrieve model/ids if provided.
try:
if command_args.model_tag:
model_ids = (model_ids +
u.list_ids(api.list_models,
"tags__in=%s" % command_args.model_tag))
command_args.model_ids_ = model_ids
except AttributeError:
pass
# Reads votes files in the provided directories.
try:
if command_args.votes_dirs:
dirs = [
directory.strip() for directory in
command_args.votes_dirs.split(
command_args.args_separator)]
votes_path = os.path.dirname(command_args.predictions)
votes_files = u.read_votes_files(dirs, votes_path)
command_args.votes_files_ = votes_files
else:
command_args.votes_files_ = []
except AttributeError:
pass
# Parses fields map if provided.
try:
if command_args.fields_map:
fields_map_arg = u.read_fields_map(command_args.fields_map)
command_args.fields_map_ = fields_map_arg
else:
command_args.fields_map_ = None
except AttributeError:
pass
cluster_ids = []
try:
# Parses cluster/ids if provided.
if command_args.clusters:
cluster_ids = u.read_resources(command_args.clusters)
command_args.cluster_ids_ = cluster_ids
except AttributeError:
pass
# Retrieve cluster/ids if provided.
try:
if command_args.cluster_tag:
cluster_ids = (cluster_ids +
u.list_ids(api.list_clusters,
"tags__in=%s" %
command_args.cluster_tag))
command_args.cluster_ids_ = cluster_ids
except AttributeError:
pass
# Parses cluster names to generate datasets if provided
try:
if command_args.cluster_datasets:
cluster_datasets_arg = [
dataset.strip() for dataset in
command_args.cluster_datasets.split(
command_args.args_separator)]
command_args.cluster_datasets_ = cluster_datasets_arg
else:
command_args.cluster_datasets_ = []
except AttributeError:
pass
# Parses cluster names to generate models if provided
try:
if command_args.cluster_models:
cluster_models_arg = [
model.strip() for model in
command_args.cluster_models.split(
command_args.args_separator)]
command_args.cluster_models_ = cluster_models_arg
else:
command_args.cluster_models_ = []
except AttributeError:
pass
anomaly_ids = []
try:
# Parses anomaly/ids if provided.
if command_args.anomalies:
anomaly_ids = u.read_resources(command_args.anomalies)
command_args.anomaly_ids_ = anomaly_ids
except AttributeError:
pass
# Retrieve anomaly/ids if provided.
try:
if command_args.anomaly_tag:
anomaly_ids = (anomaly_ids +
u.list_ids(api.list_anomalies,
"tags__in=%s" %
command_args.anomaly_tag))
command_args.anomaly_ids_ = anomaly_ids
except AttributeError:
pass
sample_ids = []
try:
# Parses sample/ids if provided.
if command_args.samples:
sample_ids = u.read_resources(command_args.samples)
command_args.sample_ids_ = sample_ids
except AttributeError:
pass
# Retrieve sample/ids if provided.
try:
if command_args.sample_tag:
sample_ids = (
sample_ids + u.list_ids(api.list_samples,
"tags__in=%s" %
command_args.sample_tag))
command_args.sample_ids_ = sample_ids
except AttributeError:
pass
# Parses sample row fields
try:
if command_args.row_fields:
row_fields_arg = [field.strip() for field in
command_args.row_fields.split(
command_args.args_separator)]
command_args.row_fields_ = row_fields_arg
else:
command_args.row_fields_ = []
except AttributeError:
pass
# Parses sample stat_fields
try:
if command_args.stat_fields:
stat_fields_arg = [field.strip() for field in
command_args.stat_fields.split(
command_args.args_separator)]
command_args.stat_fields_ = stat_fields_arg
else:
command_args.stat_fields_ = []
except AttributeError:
pass
return {"api": api, "args": command_args}
def transform_args(command_args, flags, api, user_defaults):
"""Transforms the formatted argument strings into structured arguments
"""
# Parses attributes in json format if provided
command_args.json_args = {}
for resource_type in RESOURCE_TYPES:
attributes_file = getattr(command_args,
"%s_attributes" % resource_type, None)
if attributes_file is not None:
command_args.json_args[resource_type] = u.read_json(
attributes_file)
else:
command_args.json_args[resource_type] = {}
# Parses dataset generators in json format if provided
if command_args.new_fields:
json_generators = u.read_json(command_args.new_fields)
command_args.dataset_json_generators = json_generators
else:
command_args.dataset_json_generators = {}
# Parses multi-dataset attributes in json such as field maps
if command_args.multi_dataset_attributes:
multi_dataset_json = u.read_json(command_args.multi_dataset_attributes)
command_args.multi_dataset_json = multi_dataset_json
else:
command_args.multi_dataset_json = {}
dataset_ids = None
command_args.dataset_ids = []
# Parses dataset/id if provided.
if command_args.datasets:
dataset_ids = u.read_datasets(command_args.datasets)
if len(dataset_ids) == 1:
command_args.dataset = dataset_ids[0]
command_args.dataset_ids = dataset_ids
# Reading test dataset ids is delayed till the very moment of use to ensure
# that the newly generated resources files can be used there too
command_args.test_dataset_ids = []
# Retrieve dataset/ids if provided.
if command_args.dataset_tag:
dataset_ids = dataset_ids.extend(
u.list_ids(api.list_datasets,
"tags__in=%s" % command_args.dataset_tag))
if len(dataset_ids) == 1:
command_args.dataset = dataset_ids[0]
command_args.dataset_ids = dataset_ids
# Reads a json filter if provided.
if command_args.json_filter:
json_filter = u.read_json_filter(command_args.json_filter)
command_args.json_filter = json_filter
# Reads a lisp filter if provided.
if command_args.lisp_filter:
lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
command_args.lisp_filter = lisp_filter
# Adds default tags unless that it is requested not to do so.
if command_args.no_tag:
command_args.tag.append('BigMLer')
command_args.tag.append('BigMLer_%s' % NOW)
# Checks combined votes method
try:
if (command_args.method and command_args.method != COMBINATION_LABEL
and not (command_args.method in COMBINATION_WEIGHTS.keys())):
command_args.method = 0
else:
combiner_methods = dict(
[[value, key] for key, value in COMBINER_MAP.items()])
combiner_methods[COMBINATION_LABEL] = COMBINATION
command_args.method = combiner_methods.get(command_args.method, 0)
except AttributeError:
pass
# Checks missing_strategy
try:
if (command_args.missing_strategy and
not (command_args.missing_strategy in
MISSING_STRATEGIES.keys())):
command_args.missing_strategy = 0
else:
command_args.missing_strategy = MISSING_STRATEGIES.get(
command_args.missing_strategy, 0)
except AttributeError:
pass
# Adds replacement=True if creating ensemble and nothing is specified
try:
if (command_args.number_of_models > 1 and
not command_args.replacement and
not '--no-replacement' in flags and
not 'replacement' in user_defaults and
not '--no-randomize' in flags and
not 'randomize' in user_defaults and
not '--sample-rate' in flags and
not 'sample_rate' in user_defaults):
command_args.replacement = True
except AttributeError:
pass
try:
# Old value for --prediction-info='full data' maps to 'full'
if command_args.prediction_info == 'full data':
print ("WARNING: 'full data' is a deprecated value. Use"
" 'full' instead")
command_args.prediction_info = FULL_FORMAT
except AttributeError:
pass
# Parses class, weight pairs for objective weight
try:
if command_args.objective_weights:
objective_weights = (
u.read_objective_weights(command_args.objective_weights))
command_args.objective_weights_json = objective_weights
except AttributeError:
pass
try:
command_args.multi_label_fields_list = []
if command_args.multi_label_fields is not None:
multi_label_fields = command_args.multi_label_fields.strip()
command_args.multi_label_fields_list = multi_label_fields.split(
command_args.args_separator)
except AttributeError:
pass
# Sets shared_flag if --shared or --unshared has been used
if '--shared' in flags or '--unshared' in flags:
command_args.shared_flag = True
else:
command_args.shared_flag = False
# Set remote on if scoring a trainind dataset in bigmler anomaly
try:
if command_args.score:
command_args.remote = True
if not "--prediction-info" in flags:
command_args.prediction_info = FULL_FORMAT
except AttributeError:
pass
command_args.has_models_ = (
(hasattr(command_args, 'model') and command_args.model) or
(hasattr(command_args, 'models') and command_args.models) or
(hasattr(command_args, 'ensemble') and command_args.ensemble) or
(hasattr(command_args, 'ensembles') and command_args.ensembles) or
(hasattr(command_args, 'cluster') and command_args.cluster) or
(hasattr(command_args, 'clusters') and command_args.clusters) or
(hasattr(command_args, 'model_tag') and command_args.model_tag) or
(hasattr(command_args, 'anomaly') and command_args.anomaly) or
(hasattr(command_args, 'anomalies') and command_args.anomalies) or
(hasattr(command_args, 'ensemble_tag')
and command_args.ensemble_tag) or
(hasattr(command_args, 'cluster_tag') and command_args.cluster_tag) or
(hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag))
command_args.has_datasets_ = (
(hasattr(command_args, 'dataset') and command_args.dataset) or
(hasattr(command_args, 'datasets') and command_args.datasets) or
(hasattr(command_args, 'dataset_tag') and command_args.dataset_tag))
command_args.has_test_datasets_ = (
(hasattr(command_args, 'test_dataset') and
command_args.test_dataset) or
(hasattr(command_args, 'test_datasets') and
command_args.test_datasets) or
(hasattr(command_args, 'test_dataset_tag') and
command_args.test_dataset_tag))
|
brokendata/bigmler
|
bigmler/processing/args.py
|
Python
|
apache-2.0
| 24,841
|
__author__ = 'cmantas'
from tools import *
def draw_single_move(frome, toe, minDf, **kwargs):
if frome == "weka":
table='arff2'+toe
elif toe == "weka":
table=frome+'2arff'
else:
table=frome+"2"+toe
tfidf_table= frome+"_tfidf"
query = join_query({'table':table, 'tfidf_table':tfidf_table, 'minDF':minDf})
plot_from_query(query, **kwargs)
def draw_many_moves(frome, toe, minDf_list=[10, 60, 110, 160]):
figure()
kwargs={}
for minDF in minDf_list:
kwargs['label']="minDF="+str(minDF)
kwargs['title'] ="Move "+ frome.title()+" to "+toe.title()
kwargs['ylabel']= "time (sec)"
kwargs['xlabel'] = 'documents/1000'
draw_single_move(frome, toe, minDF, **kwargs)
draw_many_moves("mahout","weka")
draw_many_moves("mahout","spark")
show()
exit()
def docs_vs_time(mover, tfidf, list, **kwargs):
join_multi(mover, tfidf,"documents/1000", "time/1000", list, **kwargs)
def size_vs_time(mover, tfidf, list, **kwargs):
join_multi(mover, tfidf,"input_size/1048576", "time/1000", list, **kwargs)
# docs_vs_time("mahout2spark", "mahout_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Mahout 2 Spark", xlabel="documents/1000", ylabel="time (sec)")
# size_vs_time("mahout2spark", "mahout_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Mahout 2 Spark", xlabel="size (MB)", ylabel="time (sec)")
#
# size_vs_time("mahout2arff", "mahout_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Mahout to arff", xlabel="size (MB)", ylabel="time (sec)")
# size_vs_time("spark2mahout", "spark_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Spark to Mahout", xlabel="size (MB)", ylabel="time (sec)")
size_vs_time("spark2arff", "spark_tfidf", cond_producer("minDF", [10, 60, 110, 160]), title="Move Spark to arff", xlabel="size (MB)", ylabel="time (sec)")
# # # multi_graph_query(query, cond_producer("minDF", [10, 60, 110, 160]), )
show()
# figure()
# rx, ry = query2lists("select input_size/1048576, time/1000 from mahout2spark order by input_size ")
# myplot(rx,ry)
# show()
exit()
|
project-asap/IReS-Platform
|
asap-tools/experiments/depricated/handler/draw_movers.py
|
Python
|
apache-2.0
| 2,135
|
from datetime import datetime
from weakref import WeakValueDictionary
from sqlalchemy import inspect
from sqlalchemy.orm import aliased
from sqlalchemy.orm.collections import InstrumentedList
from . import db
class MetaBaseModel(db.Model.__class__):
def __init__(cls, *args):
super().__init__(*args)
cls.aliases = WeakValueDictionary()
def __getitem__(cls, key):
try:
alias = cls.aliases[key]
except KeyError:
alias = aliased(cls)
cls.aliases[key] = alias
return alias
class BaseModel():
print_filter = ()
to_json_filter = ()
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, {
column: value
for column, value in self._to_dict().items()
if column not in self.print_filter
})
@property
def json(self):
json_obj = {}
for column, value in self._to_dict().items():
if column not in self.to_json_filter:
if isinstance(value, datetime):
json_obj[column] = value.strftime('%Y-%m-%d')
elif isinstance(value, InstrumentedList):
json_obj[column] = [v.json for v in value]
else:
json_obj[column] = value
return json_obj
def _to_dict(self):
return {
column.key: getattr(self, column.key)
for column in inspect(self.__class__).attrs
}
def save(self):
db.session.add(self)
db.session.commit()
return self
def delete(self):
db.session.delete(self)
db.session.commit()
|
kymy86/gatlinpush
|
models/abc.py
|
Python
|
apache-2.0
| 1,671
|
from enum import Enum
class State(Enum):
PENDING = 1
STARTED = 2
STOPPED = 3
STOPPING = 4
COMPLETE = 5
ERROR = 6
class IllegalStateError(Exception):
pass
|
csm/cloudant-sync-python
|
cloudant/sync/replication/state.py
|
Python
|
apache-2.0
| 185
|
import pytest
from awx.main.access import (
LabelAccess,
)
@pytest.mark.django_db
def test_label_get_queryset_user(label, user):
u = user('user', False)
access = LabelAccess(u)
label.organization.member_role.members.add(u)
assert access.get_queryset().count() == 1
@pytest.mark.django_db
def test_label_get_queryset_su(label, user):
access = LabelAccess(user('user', True))
assert access.get_queryset().count() == 1
@pytest.mark.django_db
def test_label_read_access(label, user):
access = LabelAccess(user('user', False))
assert not access.can_read(label)
label.organization.member_role.members.add(user('user', False))
assert access.can_read(label)
@pytest.mark.django_db
def test_label_jt_read_access(label, user, job_template):
access = LabelAccess(user('user', False))
assert not access.can_read(label)
job_template.read_role.members.add(user('user', False))
job_template.labels.add(label)
assert access.can_read(label)
@pytest.mark.django_db
def test_label_access_superuser(label, user):
access = LabelAccess(user('admin', True))
assert access.can_read(label)
assert access.can_change(label, None)
assert access.can_delete(label)
@pytest.mark.django_db
def test_label_access_admin(organization_factory):
'''can_change because I am an admin of that org'''
no_members = organization_factory("no_members")
members = organization_factory("has_members",
users=['admin'],
labels=['test'])
label = members.labels.test
admin = members.users.admin
members.organization.admin_role.members.add(admin)
access = LabelAccess(admin)
assert not access.can_change(label, {'organization': no_members.organization.id})
assert access.can_read(label)
assert access.can_change(label, None)
assert access.can_change(label, {'organization': members.organization.id})
assert access.can_delete(label)
@pytest.mark.django_db
def test_label_access_user(label, user):
access = LabelAccess(user('user', False))
label.organization.member_role.members.add(user('user', False))
assert not access.can_add({'organization': None})
assert not access.can_change(label, None)
assert not access.can_delete(label)
assert access.can_read(label)
assert access.can_add({'organization': label.organization.id})
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/tests/functional/test_rbac_label.py
|
Python
|
apache-2.0
| 2,417
|
import datetime
import resource
import sys
import traceback
class StdErrLogger():
"""Writes basic utilization data to stderr"""
def __init__(self, verbose = False):
self._verbose = verbose
def log(self, message, verbose=None):
"""Logs message to std err with optional stats on peak utilization"""
verbose = self._verbose if verbose is None else verbose
if (verbose):
usage = resource.getrusage(resource.RUSAGE_SELF)
memory_used = usage.ru_maxrss/1024
function_name = traceback.extract_stack()[-2:-1][0][2]
message = "usertime(s)={0:.0f}|systime(s)={1:.0f}|"\
"peak_memory_used(mb)={2}|{3}|{4}". \
format(usage.ru_utime, usage.ru_stime, memory_used,
function_name, message)
sys.stderr.write("{0}|{1}\n".format(datetime.datetime.today(), message))
|
lauringlab/variant_pipeline
|
scripts/logger.py
|
Python
|
apache-2.0
| 903
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.translate.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor(self):
from google.cloud.translate._http import Connection
from google.cloud.translate.client import ENGLISH_ISO_639
http = object()
client = self._make_one(http=http)
self.assertIsInstance(client._connection, Connection)
self.assertIsNone(client._connection.credentials)
self.assertIs(client._connection.http, http)
self.assertEqual(client.target_language, ENGLISH_ISO_639)
def test_constructor_non_default(self):
from google.cloud.translate._http import Connection
http = object()
target = 'es'
client = self._make_one(target_language=target, http=http)
self.assertIsInstance(client._connection, Connection)
self.assertIsNone(client._connection.credentials)
self.assertIs(client._connection.http, http)
self.assertEqual(client.target_language, target)
def test_get_languages(self):
from google.cloud.translate.client import ENGLISH_ISO_639
client = self._make_one(http=object())
supported = [
{'language': 'en', 'name': 'English'},
{'language': 'af', 'name': 'Afrikaans'},
{'language': 'am', 'name': 'Amharic'},
]
data = {
'data': {
'languages': supported,
},
}
conn = client._connection = _Connection(data)
result = client.get_languages()
self.assertEqual(result, supported)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/languages')
self.assertEqual(req['query_params'],
{'target': ENGLISH_ISO_639})
def test_get_languages_no_target(self):
client = self._make_one(
target_language=None, http=object())
supported = [
{'language': 'en'},
{'language': 'af'},
{'language': 'am'},
]
data = {
'data': {
'languages': supported,
},
}
conn = client._connection = _Connection(data)
result = client.get_languages()
self.assertEqual(result, supported)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(len(req), 3)
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/languages')
self.assertEqual(req['query_params'], {})
def test_get_languages_explicit_target(self):
client = self._make_one(http=object())
target_language = 'en'
supported = [
{'language': 'en', 'name': 'Spanish'},
{'language': 'af', 'name': 'Afrikaans'},
{'language': 'am', 'name': 'Amharic'},
]
data = {
'data': {
'languages': supported,
},
}
conn = client._connection = _Connection(data)
result = client.get_languages(target_language)
self.assertEqual(result, supported)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/languages')
self.assertEqual(req['query_params'],
{'target': target_language})
def test_detect_language_bad_result(self):
client = self._make_one(http=object())
value = 'takoy'
conn = client._connection = _Connection({})
with self.assertRaises(ValueError):
client.detect_language(value)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/detect')
query_params = [
('q', value.encode('utf-8')),
]
self.assertEqual(req['query_params'], query_params)
def test_detect_language_single_value(self):
client = self._make_one(http=object())
value = 'takoy'
detection = {
'confidence': 1.0,
'input': value,
'language': 'ru',
'isReliable': False,
}
data = {
'data': {
'detections': [[detection]],
},
}
conn = client._connection = _Connection(data)
result = client.detect_language(value)
self.assertEqual(result, detection)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/detect')
query_params = [
('q', value.encode('utf-8')),
]
self.assertEqual(req['query_params'], query_params)
def test_detect_language_multiple_values(self):
client = self._make_one(http=object())
value1 = u'fa\xe7ade' # facade (with a cedilla)
detection1 = {
'confidence': 0.6166008,
'input': value1,
'isReliable': False,
'language': 'en',
}
value2 = 's\'il vous plait'
detection2 = {
'confidence': 0.29728225,
'input': value2,
'isReliable': False,
'language': 'fr',
}
data = {
'data': {
'detections': [
[detection1],
[detection2],
],
},
}
conn = client._connection = _Connection(data)
result = client.detect_language([value1, value2])
self.assertEqual(result, [detection1, detection2])
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/detect')
query_params = [
('q', value1.encode('utf-8')),
('q', value2.encode('utf-8')),
]
self.assertEqual(req['query_params'], query_params)
def test_detect_language_multiple_results(self):
client = self._make_one(http=object())
value = 'soy'
detection1 = {
'confidence': 0.81496066,
'input': value,
'language': 'es',
'isReliable': False,
}
detection2 = {
'confidence': 0.222,
'input': value,
'language': 'en',
'isReliable': False,
}
data = {
'data': {
'detections': [[detection1, detection2]],
},
}
client._connection = _Connection(data)
with self.assertRaises(ValueError):
client.detect_language(value)
def test_translate_bad_result(self):
client = self._make_one(http=object())
value = 'hvala ti'
conn = client._connection = _Connection({})
with self.assertRaises(ValueError):
client.translate(value)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '')
query_params = [
('target', 'en'),
('q', value.encode('utf-8')),
]
self.assertEqual(req['query_params'], query_params)
def test_translate_defaults(self):
client = self._make_one(http=object())
value = 'hvala ti'
translation = {
'detectedSourceLanguage': 'hr',
'translatedText': 'thank you',
'input': value,
}
data = {
'data': {
'translations': [translation],
},
}
conn = client._connection = _Connection(data)
result = client.translate(value)
self.assertEqual(result, translation)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '')
query_params = [
('target', 'en'),
('q', value.encode('utf-8')),
]
self.assertEqual(req['query_params'], query_params)
def test_translate_multiple(self):
client = self._make_one(http=object())
value1 = 'hvala ti'
translation1 = {
'detectedSourceLanguage': 'hr',
'translatedText': 'thank you',
'input': value1,
}
value2 = 'Dankon'
translation2 = {
'detectedSourceLanguage': 'eo',
'translatedText': 'thank you',
'input': value2,
}
data = {
'data': {
'translations': [translation1, translation2],
},
}
conn = client._connection = _Connection(data)
result = client.translate([value1, value2])
self.assertEqual(result, [translation1, translation2])
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '')
query_params = [
('target', 'en'),
('q', value1.encode('utf-8')),
('q', value2.encode('utf-8')),
]
self.assertEqual(req['query_params'], query_params)
def test_translate_explicit(self):
client = self._make_one(http=object())
value = 'thank you'
target_language = 'eo'
source_language = 'en'
translation = {
'translatedText': 'Dankon',
'input': value,
}
data = {
'data': {
'translations': [translation],
},
}
conn = client._connection = _Connection(data)
cid = '123'
format_ = 'text'
model = 'nmt'
result = client.translate(value, target_language=target_language,
source_language=source_language,
format_=format_, customization_ids=cid,
model=model)
self.assertEqual(result, translation)
# Verify requested.
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '')
query_params = [
('target', target_language),
('q', value.encode('utf-8')),
('cid', cid),
('format', format_),
('source', source_language),
('model', model),
]
self.assertEqual(req['query_params'], query_params)
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
|
daspecster/google-cloud-python
|
translate/unit_tests/test_client.py
|
Python
|
apache-2.0
| 12,057
|
import json, os, sys
from collections import OrderedDict
from lxml.etree import fromstring, tostring
# Builds a temporary patch for 'pluralRanges' JSON from original
# supplemental 'pluralRanges.xml'. This data is currently missing
# from the JSON CLDR export.
ROOT = '//pluralRanges'
def read(path):
data = open(path, 'rb').read()
return fromstring(data)
def main():
tree = read(sys.argv[1])
ranges = OrderedDict()
for n in tree.xpath('//pluralRanges'):
locales = n.attrib.get('locales').split()
recs = []
for c in n.xpath('./pluralRange'):
rec = dict((k, v) for k, v in c.attrib.iteritems())
recs.append(rec)
for k in locales:
ranges[k] = recs
sort = OrderedDict()
for k in sorted(ranges.iterkeys()):
sort[k] = ranges[k]
res = dict(
supplemental = dict(
pluralRanges = sort
)
)
print json.dumps(res, indent=2)
if __name__ == '__main__':
main()
|
Squarespace/cldr
|
scripts/make_pluralrange_fix.py
|
Python
|
apache-2.0
| 1,005
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import extlib_cuda as extlib
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from collections import namedtuple
BetaDist = namedtuple('BetaDist', ['concentration1', 'concentration0'])
class BetaDistFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, entity_embed, re_embed, im_embed, dist_name):
assert entity_embed.is_contiguous()
assert re_embed.is_contiguous()
assert im_embed.is_contiguous()
assert entity_embed.shape[-1] == re_embed.shape[-1] * 2 == im_embed.shape[-1] * 2
assert entity_embed.shape[0] % re_embed.shape[0] == 0 or re_embed.shape[0] % re_embed.shape[0] == 0
assert entity_embed.shape[1] % re_embed.shape[1] == 0 or re_embed.shape[1] % entity_embed.shape[1] == 0
assert re_embed.shape == im_embed.shape
out_rows = max(entity_embed.shape[0], re_embed.shape[0])
out_cols = max(entity_embed.shape[1], re_embed.shape[1])
with torch.no_grad():
dst = entity_embed.new(out_rows, out_cols).contiguous()
ctx.dist_name = dist_name
ctx.save_for_backward(entity_embed.data, re_embed.data, im_embed.data)
extlib.beta_dist_forward(entity_embed, re_embed, im_embed, dst, dist_name)
return dst
@staticmethod
def backward(ctx, grad_out):
with torch.no_grad():
entity_embed, re_embed, im_embed = ctx.saved_tensors
grad_entity = grad_out.new(entity_embed.shape).zero_()
grad_re = grad_out.new(re_embed.shape).zero_()
grad_im = grad_out.new(im_embed.shape).zero_()
extlib.beta_dist_backward(grad_out, entity_embed, re_embed, im_embed, grad_entity, grad_re, grad_im, ctx.dist_name)
return grad_entity, grad_re, grad_im, None
def beta_dist(entity_embed, query_dist, dist_name):
re_embed, im_embed = query_dist.concentration1, query_dist.concentration0
if entity_embed.dim() != re_embed.dim():
assert re_embed.dim() == 4
assert entity_embed.dim() == 3
l_dist = []
for i in range(re_embed.shape[1]):
re = re_embed[:, i, :, :].contiguous()
im = im_embed[:, i, :, :].contiguous()
d = BetaDistFunc.apply(entity_embed, re, im, dist_name)
l_dist.append(d)
d = torch.stack(l_dist, dim=1)
return d
else:
assert entity_embed.dim() == 3 and re_embed.dim() == 3 and im_embed.dim() == 3
return BetaDistFunc.apply(entity_embed, re_embed, im_embed, dist_name)
def beta_kl(entity_embed, query_dist):
return beta_dist(entity_embed, query_dist, "kl")
def beta_l2(entity_embed, query_dist):
return beta_dist(entity_embed, query_dist, "l2")
def beta_fisher_approx(entity_embed, query_dist):
return beta_dist(entity_embed, query_dist, "fisher_approx")
def naive_beta_kl(entity_embedding, query_dist):
alpha_embedding, beta_embedding = torch.chunk(entity_embedding, 2, dim=-1)
entity_dist = BetaDist(alpha_embedding, beta_embedding)
kld = torch.distributions.kl._kl_beta_beta(entity_dist, query_dist)
return torch.norm(kld, p=1, dim=-1)
def naive_beta_l2(entity_embedding, query_dist):
alpha_embedding, beta_embedding = torch.chunk(entity_embedding, 2, dim=-1)
d1 = (alpha_embedding - query_dist.concentration1) ** 2
d2 = (beta_embedding - query_dist.concentration0) ** 2
d = torch.sum(d1 + d2, dim=-1) * 0.5
return d
def naive_beta_fisher_approx(entity_embedding, query_dist):
alpha_embedding, beta_embedding = torch.chunk(entity_embedding, 2, dim=-1)
d1 = (alpha_embedding - query_dist.concentration1)
d2 = (beta_embedding - query_dist.concentration0)
with torch.no_grad():
tri_alpha = alpha_embedding.polygamma(1)
tri_beta = beta_embedding.polygamma(1)
tri_sum = -(alpha_embedding + beta_embedding).polygamma(1)
t1 = (tri_alpha + tri_sum) * (d1 ** 2)
t2 = 2 * tri_sum * d1 * d2
t3 = (tri_beta + tri_sum) * (d2 ** 2)
return 0.5 * torch.sum(t1 + t2 + t3, dim=-1)
def test_beta(dist_name):
from smore.common.modules import Regularizer
reg = Regularizer(1, 0.05, 1e9)
entity = Parameter(reg(torch.randn(30, 1, 400)).data.cuda())
re = Parameter(reg(torch.randn(1, 20, 200)).data.cuda())
im = Parameter(reg(torch.randn(1, 20, 200)).data.cuda())
query_dist = BetaDist(re, im)
if dist_name == 'kl':
fast_d = beta_kl
slow_d = naive_beta_kl
elif dist_name == 'l2':
fast_d = beta_l2
slow_d = naive_beta_l2
elif dist_name == 'fisher_approx':
fast_d = beta_fisher_approx
slow_d = naive_beta_fisher_approx
else:
raise NotImplementedError
l2 = fast_d(entity, query_dist)
loss = torch.sum(l2 ** 2) * 3.14
print(loss.item())
loss.backward()
e2 = entity.grad.clone()
r2 = re.grad.clone()
i2 = im.grad.clone()
print('\n========\n')
entity.grad = re.grad = im.grad = None
l1 = slow_d(entity, query_dist)
loss = torch.sum(l1 ** 2) * 3.14
print(loss.item())
loss.backward()
e1 = entity.grad.clone()
r1 = re.grad.clone()
i1 = im.grad.clone()
print(torch.mean(torch.abs(e1 - e2)))
print(torch.mean(torch.abs(r1 - r2)))
print(torch.mean(torch.abs(i1 - i2)))
if __name__ == '__main__':
import numpy as np
import random
torch.manual_seed(1)
np.random.seed(1)
random.seed(1)
test_beta('fisher_approx')
|
google-research/smore
|
smore/common/torchext/dist_func/beta_dist.py
|
Python
|
apache-2.0
| 6,068
|
#!/usr/bin/env python
#encoding=utf-8
#识别验证码返回由验证码数字组成的字符串
import Image,ImageEnhance,ImageFilter
#将图片切成四份,并获得四个数字图形的序列,并将序列保存到data.txt文件中
#此函数是用来获取1-9数字的参照序列的
def split_pic_save(image_name):
im = Image.open(image_name)
im = im.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(2)
im = im.convert('1')
#im.show()
#all by pixel
s = 4 #start postion of first number
w = 7 #width of each number
h = 15 #end postion from top
im_new = []
#split four numbers in the picture
for i in range(4):
im1 = im.crop((s+w*i,0,s+w*(i+1),15))
im_new.append(im1)
f = file("data.txt","a")
for k in range(4):
l = []
#im_new[k].show()
for i in range(15):
for j in range(7):
if (im_new[k].getpixel((j,i)) == 255):
l.append(0)
else:
l.append(1)
f.write("l=[")
n = 0
for i in l:
if (n%10==0):
f.write("\n")
f.write(str(i)+",")
n+=1
f.write("]\n")
#将图片转为黑白,取出噪点
def getverify(name):
# 二值化
threshold = 140
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
#打开图片
im = Image.open(name)
#转化到亮度
imgry = im.convert('L')
#imgry.save('g'+name) #保存图片
#二值化
out = imgry.point(table,'1')
out.save(name)
#将图片切片,并获得四个数字的序列
def split_pic(image_name):
#调用getverify函数将图片做预处理
getverify(image_name)
im = Image.open(image_name)
im = im.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(2)
im = im.convert('1')
#im.show()
#all by pixel
s = 4 #start postion of first number
w = 7 #width of each number
h = 15 #end postion from top
im_new = []
#split four numbers in the picture
for i in range(4):
im1 = im.crop((s+w*i,0,s+w*(i+1),15))
im_new.append(im1)
code_data = []
for k in range(4):
l = []
for i in range(15):
for j in range(7):
if (im_new[k].getpixel((j,i)) == 255):
l.append(0)
else:
l.append(1)
code_data.append(l)
return code_data
def getcode(img):
refer_dic = {
1:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,1,1,1,
0,0,0,0,1,1,1,0,0,0,
0,0,1,1,0,0,0,0,0,1,
1,0,0,0,0,0,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,
1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
2:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,
1,1,0,0,0,0,0,1,1,0,
0,0,0,1,1,1,0,0,0,0,
1,0,0,0,0,1,1,0,0,0,
0,1,1,1,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
3:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,
1,1,0,0,0,0,1,1,1,0,
0,0,0,1,1,1,0,0,0,0,
1,1,1,0,0,0,0,0,1,1,
0,0,0,0,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
4:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,1,
1,1,0,0,0,1,1,1,1,0,
0,1,1,1,1,1,0,1,1,1,
1,1,1,1,0,1,0,1,1,1,
0,0,0,0,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
5:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,1,1,1,1,
1,0,0,1,1,1,0,0,0,0,
0,1,0,1,1,1,0,0,0,0,
0,1,1,0,0,0,0,0,1,1,
0,0,0,0,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
6:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,0,
0,0,1,1,1,1,0,0,0,1,
1,1,1,0,0,0,1,1,1,1,
1,1,1,1,1,1,0,0,1,1,
0,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
7:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,1,
1,1,0,0,0,0,1,1,0,0,
0,0,0,1,1,0,0,0,0,1,
1,0,0,0,0,0,1,1,0,0,
0,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
8:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,
1,1,0,1,1,1,1,1,1,0,
1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,0,0,1,1,
0,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,],
9:[
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,
1,1,0,1,1,0,0,1,1,0,
1,1,1,1,1,1,0,0,0,0,
1,1,1,0,0,0,0,1,1,1,
0,0,0,0,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,]
}
code_data = split_pic(img)
code = ""
for x in range(len(code_data)): #循环验证码数字
for y in refer_dic: #循环对照序列
n = 0
for z in range(len(code_data[x])): #循环验证码单个数字的01序列,与对照的1-9学列对照
if code_data[x][z] == refer_dic[y][z]:
n+=1
if n >= len(code_data[x])*0.95:
code+=`y`
return code
|
lichenhui/guahao
|
guahao_beijing/code_identify.py
|
Python
|
apache-2.0
| 4,985
|
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
import platform
# import random
import threading
import pickle
import time
import datetime
import queue
import uuid
import zmq
import json
from mpi4py import MPI
from parsl.app.errors import RemoteExceptionWrapper
from parsl.version import VERSION as PARSL_VERSION
from parsl.serialize import unpack_apply_message, serialize
RESULT_TAG = 10
TASK_REQUEST_TAG = 11
LOOP_SLOWDOWN = 0.0 # in seconds
HEARTBEAT_CODE = (2 ** 32) - 1
class Manager(object):
""" Orchestrates the flow of tasks and results to and from the workers
1. Queue up task requests from workers
2. Make batched requests from to the interchange for tasks
3. Receive and distribute tasks to workers
4. Act as a proxy to the Interchange for results.
"""
def __init__(self,
comm, rank,
task_q_url="tcp://127.0.0.1:50097",
result_q_url="tcp://127.0.0.1:50098",
max_queue_size=10,
heartbeat_threshold=120,
heartbeat_period=30,
uid=None):
"""
Parameters
----------
worker_url : str
Worker url on which workers will attempt to connect back
heartbeat_threshold : int
Number of seconds since the last message from the interchange after which the worker
assumes that the interchange is lost and the manager shuts down. Default:120
heartbeat_period : int
Number of seconds after which a heartbeat message is sent to the interchange
"""
self.uid = uid
self.context = zmq.Context()
self.task_incoming = self.context.socket(zmq.DEALER)
self.task_incoming.setsockopt(zmq.IDENTITY, uid.encode('utf-8'))
# Linger is set to 0, so that the manager can exit even when there might be
# messages in the pipe
self.task_incoming.setsockopt(zmq.LINGER, 0)
self.task_incoming.connect(task_q_url)
self.result_outgoing = self.context.socket(zmq.DEALER)
self.result_outgoing.setsockopt(zmq.IDENTITY, uid.encode('utf-8'))
self.result_outgoing.setsockopt(zmq.LINGER, 0)
self.result_outgoing.connect(result_q_url)
logger.info("Manager connected")
self.max_queue_size = max_queue_size + comm.size
# Creating larger queues to avoid queues blocking
# These can be updated after queue limits are better understood
self.pending_task_queue = queue.Queue()
self.pending_result_queue = queue.Queue()
self.ready_worker_queue = queue.Queue()
self.tasks_per_round = 1
self.heartbeat_period = heartbeat_period
self.heartbeat_threshold = heartbeat_threshold
self.comm = comm
self.rank = rank
def create_reg_message(self):
""" Creates a registration message to identify the worker to the interchange
"""
msg = {'parsl_v': PARSL_VERSION,
'python_v': "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro),
'os': platform.system(),
'hostname': platform.node(),
'dir': os.getcwd(),
'prefetch_capacity': 0,
'worker_count': (self.comm.size - 1),
'max_capacity': (self.comm.size - 1) + 0, # (+prefetch)
'reg_time': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
b_msg = json.dumps(msg).encode('utf-8')
return b_msg
def heartbeat(self):
""" Send heartbeat to the incoming task queue
"""
heartbeat = (HEARTBEAT_CODE).to_bytes(4, "little")
r = self.task_incoming.send(heartbeat)
logger.debug("Return from heartbeat : {}".format(r))
def recv_result_from_workers(self):
""" Receives a results from the MPI worker pool and send it out via 0mq
Returns:
--------
result: task result from the workers
"""
info = MPI.Status()
result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)
logger.debug("Received result from workers: {}".format(result))
return result
def recv_task_request_from_workers(self):
""" Receives 1 task request from MPI comm
Returns:
--------
worker_rank: worker_rank id
"""
info = MPI.Status()
comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
worker_rank = info.Get_source()
logger.info("Received task request from worker:{}".format(worker_rank))
return worker_rank
def pull_tasks(self, kill_event):
""" Pulls tasks from the incoming tasks 0mq pipe onto the internal
pending task queue
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.info("[TASK PULL THREAD] starting")
poller = zmq.Poller()
poller.register(self.task_incoming, zmq.POLLIN)
# Send a registration message
msg = self.create_reg_message()
logger.debug("Sending registration message: {}".format(msg))
self.task_incoming.send(msg)
last_beat = time.time()
last_interchange_contact = time.time()
task_recv_counter = 0
poll_timer = 1
while not kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
ready_worker_count = self.ready_worker_queue.qsize()
pending_task_count = self.pending_task_queue.qsize()
logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count,
pending_task_count))
if time.time() > last_beat + self.heartbeat_period:
self.heartbeat()
last_beat = time.time()
if pending_task_count < self.max_queue_size and ready_worker_count > 0:
logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count))
msg = ((ready_worker_count).to_bytes(4, "little"))
self.task_incoming.send(msg)
socks = dict(poller.poll(timeout=poll_timer))
if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:
_, pkl_msg = self.task_incoming.recv_multipart()
tasks = pickle.loads(pkl_msg)
last_interchange_contact = time.time()
if tasks == 'STOP':
logger.critical("[TASK_PULL_THREAD] Received stop request")
kill_event.set()
break
elif tasks == HEARTBEAT_CODE:
logger.debug("Got heartbeat from interchange")
else:
# Reset timer on receiving message
poll_timer = 1
task_recv_counter += len(tasks)
logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t['task_id'] for t in tasks],
task_recv_counter))
for task in tasks:
self.pending_task_queue.put(task)
else:
logger.debug("[TASK_PULL_THREAD] No incoming tasks")
# Limit poll duration to heartbeat_period
# heartbeat_period is in s vs poll_timer in ms
poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2)
# Only check if no messages were received.
if time.time() > last_interchange_contact + self.heartbeat_threshold:
logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
kill_event.set()
logger.critical("[TASK_PULL_THREAD] Exiting")
break
def push_results(self, kill_event):
""" Listens on the pending_result_queue and sends out results via 0mq
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
# We set this timeout so that the thread checks the kill_event and does not
# block forever on the internal result queue
timeout = 0.1
# timer = time.time()
logger.debug("[RESULT_PUSH_THREAD] Starting thread")
while not kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
try:
items = []
while not self.pending_result_queue.empty():
r = self.pending_result_queue.get(block=True)
items.append(r)
if items:
self.result_outgoing.send_multipart(items)
except queue.Empty:
logger.debug("[RESULT_PUSH_THREAD] No results to send in past {}seconds".format(timeout))
except Exception as e:
logger.exception("[RESULT_PUSH_THREAD] Got an exception : {}".format(e))
logger.critical("[RESULT_PUSH_THREAD] Exiting")
def start(self):
""" Start the Manager process.
The worker loops on this:
1. If the last message sent was older than heartbeat period we send a heartbeat
2.
TODO: Move task receiving to a thread
"""
self.comm.Barrier()
logger.debug("Manager synced with workers")
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.pull_tasks,
args=(self._kill_event,))
self._result_pusher_thread = threading.Thread(target=self.push_results,
args=(self._kill_event,))
self._task_puller_thread.start()
self._result_pusher_thread.start()
start = None
result_counter = 0
task_recv_counter = 0
task_sent_counter = 0
logger.info("Loop start")
while not self._kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
# In this block we attempt to probe MPI for a set amount of time,
# and if we have exhausted all available MPI events, we move on
# to the next block. The timer and counter trigger balance
# fairness and responsiveness.
timer = time.time() + 0.05
counter = min(10, comm.size)
while time.time() < timer:
info = MPI.Status()
if counter > 10:
logger.debug("Hit max mpi events per round")
break
if not self.comm.Iprobe(status=info):
logger.debug("Timer expired, processed {} mpi events".format(counter))
break
else:
tag = info.Get_tag()
logger.info("Message with tag {} received".format(tag))
counter += 1
if tag == RESULT_TAG:
result = self.recv_result_from_workers()
self.pending_result_queue.put(result)
result_counter += 1
elif tag == TASK_REQUEST_TAG:
worker_rank = self.recv_task_request_from_workers()
self.ready_worker_queue.put(worker_rank)
else:
logger.error("Unknown tag {} - ignoring this message and continuing".format(tag))
available_worker_cnt = self.ready_worker_queue.qsize()
available_task_cnt = self.pending_task_queue.qsize()
logger.debug("[MAIN] Ready workers: {} Ready tasks: {}".format(available_worker_cnt,
available_task_cnt))
this_round = min(available_worker_cnt, available_task_cnt)
for i in range(this_round):
worker_rank = self.ready_worker_queue.get()
task = self.pending_task_queue.get()
comm.send(task, dest=worker_rank, tag=worker_rank)
task_sent_counter += 1
logger.debug("Assigning worker:{} task:{}".format(worker_rank, task['task_id']))
if not start:
start = time.time()
logger.debug("Tasks recvd:{} Tasks dispatched:{} Results recvd:{}".format(
task_recv_counter, task_sent_counter, result_counter))
# print("[{}] Received: {}".format(self.identity, msg))
# time.sleep(random.randint(4,10)/10)
self._task_puller_thread.join()
self._result_pusher_thread.join()
self.task_incoming.close()
self.result_outgoing.close()
self.context.term()
delta = time.time() - start
logger.info("mpi_worker_pool ran for {} seconds".format(delta))
def execute_task(bufs):
"""Deserialize the buffer and execute the task.
Returns the serialized result or exception.
"""
user_ns = locals()
user_ns.update({'__builtins__': __builtins__})
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
fname = getattr(f, '__name__', 'f')
prefix = "parsl_"
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
user_ns.update({fname: f,
argname: args,
kwargname: kwargs,
resultname: resultname})
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
logger.debug("[RUNNER] Executing: {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught exception; will raise it: {}".format(e))
raise e
else:
logger.debug("[RUNNER] Result: {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname)
def worker(comm, rank):
logger.info("Worker started")
# Sync worker with master
comm.Barrier()
logger.debug("Synced")
task_request = b'TREQ'
while True:
comm.send(task_request, dest=0, tag=TASK_REQUEST_TAG)
# The worker will receive {'task_id':<tid>, 'buffer':<buf>}
req = comm.recv(source=0, tag=rank)
logger.debug("Got req: {}".format(req))
tid = req['task_id']
logger.debug("Got task: {}".format(tid))
try:
result = execute_task(req['buffer'])
except Exception as e:
result_package = {'task_id': tid, 'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
logger.debug("No result due to exception: {} with result package {}".format(e, result_package))
else:
result_package = {'task_id': tid, 'result': serialize(result)}
logger.debug("Result: {}".format(result))
pkl_package = pickle.dumps(result_package)
comm.send(pkl_package, dest=0, tag=RESULT_TAG)
def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Args:
- filename (string): Name of the file to write logs to
- name (string): Logger name
- level (logging.LEVEL): Set the logging level.
- format_string (string): Set the format string
Returns:
- None
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d Rank:{0} [%(levelname)s] %(message)s".format(rank)
global logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
def set_stream_logger(name='parsl', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Args:
- name (string) : Set the logger name.
- level (logging.LEVEL) : Set to logging.DEBUG by default.
- format_string (sting) : Set to None by default.
Returns:
- None
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] Thread:%(thread)d %(message)s"
# format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
global logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
parser.add_argument("-l", "--logdir", default="parsl_worker_logs",
help="Parsl worker log directory")
parser.add_argument("-u", "--uid", default=str(uuid.uuid4()).split('-')[-1],
help="Unique identifier string for Manager")
parser.add_argument("-t", "--task_url", required=True,
help="REQUIRED: ZMQ url for receiving tasks")
parser.add_argument("--hb_period", default=30,
help="Heartbeat period in seconds. Uses manager default unless set")
parser.add_argument("--hb_threshold", default=120,
help="Heartbeat threshold in seconds. Uses manager default unless set")
parser.add_argument("-r", "--result_url", required=True,
help="REQUIRED: ZMQ url for posting results")
args = parser.parse_args()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
print("Starting rank: {}".format(rank))
os.makedirs(args.logdir, exist_ok=True)
# set_stream_logger()
try:
if rank == 0:
start_file_logger('{}/manager.mpi_rank_{}.log'.format(args.logdir, rank),
rank,
level=logging.DEBUG if args.debug is True else logging.INFO)
logger.info("Python version: {}".format(sys.version))
manager = Manager(comm, rank,
task_q_url=args.task_url,
result_q_url=args.result_url,
uid=args.uid,
heartbeat_threshold=int(args.hb_threshold),
heartbeat_period=int(args.hb_period))
manager.start()
logger.debug("Finalizing MPI Comm")
comm.Abort()
else:
start_file_logger('{}/worker.mpi_rank_{}.log'.format(args.logdir, rank),
rank,
level=logging.DEBUG if args.debug is True else logging.INFO)
worker(comm, rank)
except Exception as e:
logger.critical("mpi_worker_pool exiting from an exception")
logger.exception("Caught error: {}".format(e))
raise
else:
logger.info("mpi_worker_pool exiting")
print("MPI_WORKER_POOL exiting.")
|
Parsl/parsl
|
parsl/executors/extreme_scale/mpi_worker_pool.py
|
Python
|
apache-2.0
| 19,559
|
from typing import Dict, Optional
import time
from collections import defaultdict, deque
import numpy as np
from ray import logger
from ray.util.annotations import PublicAPI
@PublicAPI
class Stopper:
"""Base class for implementing a Tune experiment stopper.
Allows users to implement experiment-level stopping via ``stop_all``. By
default, this class does not stop any trials. Subclasses need to
implement ``__call__`` and ``stop_all``.
.. code-block:: python
import time
from ray import tune
from ray.tune import Stopper
class TimeStopper(Stopper):
def __init__(self):
self._start = time.time()
self._deadline = 300
def __call__(self, trial_id, result):
return False
def stop_all(self):
return time.time() - self._start > self.deadline
tune.run(Trainable, num_samples=200, stop=TimeStopper())
"""
def __call__(self, trial_id, result):
"""Returns true if the trial should be terminated given the result."""
raise NotImplementedError
def stop_all(self):
"""Returns true if the experiment should be terminated."""
raise NotImplementedError
@PublicAPI
class CombinedStopper(Stopper):
"""Combine several stoppers via 'OR'.
Args:
*stoppers (Stopper): Stoppers to be combined.
Example:
.. code-block:: python
from ray.tune.stopper import CombinedStopper, \
MaximumIterationStopper, TrialPlateauStopper
stopper = CombinedStopper(
MaximumIterationStopper(max_iter=20),
TrialPlateauStopper(metric="my_metric")
)
tune.run(train, stop=stopper)
"""
def __init__(self, *stoppers: Stopper):
self._stoppers = stoppers
def __call__(self, trial_id, result):
return any(s(trial_id, result) for s in self._stoppers)
def stop_all(self):
return any(s.stop_all() for s in self._stoppers)
@PublicAPI
class NoopStopper(Stopper):
def __call__(self, trial_id, result):
return False
def stop_all(self):
return False
@PublicAPI
class FunctionStopper(Stopper):
"""Provide a custom function to check if trial should be stopped.
The passed function will be called after each iteration. If it returns
True, the trial will be stopped.
Args:
function (Callable[[str, Dict], bool): Function that checks if a trial
should be stopped. Must accept the `trial_id` string and `result`
dictionary as arguments. Must return a boolean.
"""
def __init__(self, function):
self._fn = function
def __call__(self, trial_id, result):
return self._fn(trial_id, result)
def stop_all(self):
return False
@classmethod
def is_valid_function(cls, fn):
is_function = callable(fn) and not issubclass(type(fn), Stopper)
if is_function and hasattr(fn, "stop_all"):
raise ValueError(
"Stop object must be ray.tune.Stopper subclass to be detected "
"correctly."
)
return is_function
@PublicAPI
class MaximumIterationStopper(Stopper):
"""Stop trials after reaching a maximum number of iterations
Args:
max_iter (int): Number of iterations before stopping a trial.
"""
def __init__(self, max_iter: int):
self._max_iter = max_iter
self._iter = defaultdict(lambda: 0)
def __call__(self, trial_id: str, result: Dict):
self._iter[trial_id] += 1
return self._iter[trial_id] >= self._max_iter
def stop_all(self):
return False
@PublicAPI
class ExperimentPlateauStopper(Stopper):
"""Early stop the experiment when a metric plateaued across trials.
Stops the entire experiment when the metric has plateaued
for more than the given amount of iterations specified in
the patience parameter.
Args:
metric (str): The metric to be monitored.
std (float): The minimal standard deviation after which
the tuning process has to stop.
top (int): The number of best models to consider.
mode (str): The mode to select the top results.
Can either be "min" or "max".
patience (int): Number of epochs to wait for
a change in the top models.
Raises:
ValueError: If the mode parameter is not "min" nor "max".
ValueError: If the top parameter is not an integer
greater than 1.
ValueError: If the standard deviation parameter is not
a strictly positive float.
ValueError: If the patience parameter is not
a strictly positive integer.
"""
def __init__(self, metric, std=0.001, top=10, mode="min", patience=0):
if mode not in ("min", "max"):
raise ValueError("The mode parameter can only be" " either min or max.")
if not isinstance(top, int) or top <= 1:
raise ValueError(
"Top results to consider must be"
" a positive integer greater than one."
)
if not isinstance(patience, int) or patience < 0:
raise ValueError("Patience must be" " a strictly positive integer.")
if not isinstance(std, float) or std <= 0:
raise ValueError(
"The standard deviation must be" " a strictly positive float number."
)
self._mode = mode
self._metric = metric
self._patience = patience
self._iterations = 0
self._std = std
self._top = top
self._top_values = []
def __call__(self, trial_id, result):
"""Return a boolean representing if the tuning has to stop."""
self._top_values.append(result[self._metric])
if self._mode == "min":
self._top_values = sorted(self._top_values)[: self._top]
else:
self._top_values = sorted(self._top_values)[-self._top :]
# If the current iteration has to stop
if self.has_plateaued():
# we increment the total counter of iterations
self._iterations += 1
else:
# otherwise we reset the counter
self._iterations = 0
# and then call the method that re-executes
# the checks, including the iterations.
return self.stop_all()
def has_plateaued(self):
return (
len(self._top_values) == self._top and np.std(self._top_values) <= self._std
)
def stop_all(self):
"""Return whether to stop and prevent trials from starting."""
return self.has_plateaued() and self._iterations >= self._patience
@PublicAPI
class TrialPlateauStopper(Stopper):
"""Early stop single trials when they reached a plateau.
When the standard deviation of the `metric` result of a trial is
below a threshold `std`, the trial plateaued and will be stopped
early.
Args:
metric (str): Metric to check for convergence.
std (float): Maximum metric standard deviation to decide if a
trial plateaued. Defaults to 0.01.
num_results (int): Number of results to consider for stdev
calculation.
grace_period (int): Minimum number of timesteps before a trial
can be early stopped
metric_threshold (Optional[float]):
Minimum or maximum value the result has to exceed before it can
be stopped early.
mode (Optional[str]): If a `metric_threshold` argument has been
passed, this must be one of [min, max]. Specifies if we optimize
for a large metric (max) or a small metric (min). If max, the
`metric_threshold` has to be exceeded, if min the value has to
be lower than `metric_threshold` in order to early stop.
"""
def __init__(
self,
metric: str,
std: float = 0.01,
num_results: int = 4,
grace_period: int = 4,
metric_threshold: Optional[float] = None,
mode: Optional[str] = None,
):
self._metric = metric
self._mode = mode
self._std = std
self._num_results = num_results
self._grace_period = grace_period
self._metric_threshold = metric_threshold
if self._metric_threshold:
if mode not in ["min", "max"]:
raise ValueError(
f"When specifying a `metric_threshold`, the `mode` "
f"argument has to be one of [min, max]. "
f"Got: {mode}"
)
self._iter = defaultdict(lambda: 0)
self._trial_results = defaultdict(lambda: deque(maxlen=self._num_results))
def __call__(self, trial_id: str, result: Dict):
metric_result = result.get(self._metric)
self._trial_results[trial_id].append(metric_result)
self._iter[trial_id] += 1
# If still in grace period, do not stop yet
if self._iter[trial_id] < self._grace_period:
return False
# If not enough results yet, do not stop yet
if len(self._trial_results[trial_id]) < self._num_results:
return False
# If metric threshold value not reached, do not stop yet
if self._metric_threshold is not None:
if self._mode == "min" and metric_result > self._metric_threshold:
return False
elif self._mode == "max" and metric_result < self._metric_threshold:
return False
# Calculate stdev of last `num_results` results
try:
current_std = np.std(self._trial_results[trial_id])
except Exception:
current_std = float("inf")
# If stdev is lower than threshold, stop early.
return current_std < self._std
def stop_all(self):
return False
@PublicAPI
class TimeoutStopper(Stopper):
"""Stops all trials after a certain timeout.
This stopper is automatically created when the `time_budget_s`
argument is passed to `tune.run()`.
Args:
timeout (int|float|datetime.timedelta): Either a number specifying
the timeout in seconds, or a `datetime.timedelta` object.
"""
def __init__(self, timeout):
from datetime import timedelta
if isinstance(timeout, timedelta):
self._timeout_seconds = timeout.total_seconds()
elif isinstance(timeout, (int, float)):
self._timeout_seconds = timeout
else:
raise ValueError(
"`timeout` parameter has to be either a number or a "
"`datetime.timedelta` object. Found: {}".format(type(timeout))
)
# To account for setup overhead, set the start time only after
# the first call to `stop_all()`.
self._start = None
def __call__(self, trial_id, result):
return False
def stop_all(self):
if not self._start:
self._start = time.time()
return False
now = time.time()
if now - self._start >= self._timeout_seconds:
logger.info(
f"Reached timeout of {self._timeout_seconds} seconds. "
f"Stopping all trials."
)
return True
return False
|
ray-project/ray
|
python/ray/tune/stopper.py
|
Python
|
apache-2.0
| 11,388
|
from test.integration.base import DBTIntegrationTest, use_profile
class TestSimpleSnapshotFiles(DBTIntegrationTest):
NUM_SNAPSHOT_MODELS = 1
@property
def schema(self):
return "simple_snapshot_004"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
"snapshot-paths": ['snapshots-check'],
"test-paths": ['test-check-snapshots-expected'],
"model-paths": [],
}
def test_snapshot_check_cols_cycle(self):
results = self.run_dbt(["snapshot", '--vars', 'version: 1'])
self.assertEqual(len(results), 1)
results = self.run_dbt(["snapshot", '--vars', 'version: 2'])
self.assertEqual(len(results), 1)
results = self.run_dbt(["snapshot", '--vars', 'version: 3'])
self.assertEqual(len(results), 1)
def assert_expected(self):
self.run_dbt(['test', '--select', 'test_type:singular', '--vars', 'version: 3'])
@use_profile('postgres')
def test__postgres__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
|
analyst-collective/dbt
|
test/integration/004_simple_snapshot_tests/test_snapshot_check_cols.py
|
Python
|
apache-2.0
| 1,187
|
def fullspeed():
if isRightHandActivated:
i01.setHandVelocity("right", -1, -1, -1, -1, -1, -1)
if isLeftHandActivated:
i01.setHandVelocity("left", -1, -1, -1, -1, -1, -1)
if isRightArmActivated:
i01.setArmVelocity("right", -1, -1, -1, -1)
if isLeftArmActivated:
i01.setArmVelocity("left", -1, -1, -1, -1)
if isHeadActivated:
i01.setHeadVelocity(-1, -1, -1)
if isTorsoActivated:
i01.setTorsoVelocity(-1, -1, -1)
if isEyeLidsActivated:
i01.setEyelidsVelocity(-1,-1)
|
MyRobotLab/pyrobotlab
|
home/kwatters/harry/gestures/fullspeed.py
|
Python
|
apache-2.0
| 539
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from abc import abstractmethod, ABCMeta
from pyspark import since, keyword_only
from pyspark.ml.wrapper import JavaParams
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasRawPredictionCol, \
HasFeaturesCol, HasWeightCol
from pyspark.ml.common import inherit_doc
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
__all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator',
'MulticlassClassificationEvaluator', 'MultilabelClassificationEvaluator',
'ClusteringEvaluator']
@inherit_doc
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
.. versionadded:: 1.4.0
"""
__metaclass__ = ABCMeta
@abstractmethod
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and
predictions
:return: metric
"""
raise NotImplementedError()
@since("1.4.0")
def evaluate(self, dataset, params=None):
"""
Evaluates the output with optional parameters.
:param dataset: a dataset that contains labels/observations and
predictions
:param params: an optional param map that overrides embedded
params
:return: metric
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._evaluate(dataset)
else:
return self._evaluate(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@since("1.5.0")
def isLargerBetter(self):
"""
Indicates whether the metric returned by :py:meth:`evaluate` should be maximized
(True, default) or minimized (False).
A given evaluator may support multiple metrics which may be maximized or minimized.
"""
return True
@inherit_doc
class JavaEvaluator(JavaParams, Evaluator):
"""
Base class for :py:class:`Evaluator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and predictions.
:return: evaluation metric
"""
self._transfer_params_to_java()
return self._java_obj.evaluate(dataset._jdf)
def isLargerBetter(self):
self._transfer_params_to_java()
return self._java_obj.isLargerBetter()
@inherit_doc
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol, HasWeightCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for binary classification, which expects two input columns: rawPrediction and label.
The rawPrediction column can be of type double (binary 0/1 prediction, or probability of label
1) or of type vector (length-2 vector of raw predictions, scores, or label probabilities).
>>> from pyspark.ml.linalg import Vectors
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.83...
>>> bce_path = temp_path + "/bce"
>>> evaluator.save(bce_path)
>>> evaluator2 = BinaryClassificationEvaluator.load(bce_path)
>>> str(evaluator2.getRawPredictionCol())
'raw'
>>> scoreAndLabelsAndWeight = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1], x[2]),
... [(0.1, 0.0, 1.0), (0.1, 1.0, 0.9), (0.4, 0.0, 0.7), (0.6, 0.0, 0.9),
... (0.6, 1.0, 1.0), (0.6, 1.0, 0.3), (0.8, 1.0, 1.0)])
>>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["raw", "label", "weight"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw", weightCol="weight")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.82...
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC", weightCol=None):
"""
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC", weightCol=None)
"""
super(BinaryClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid)
self._setDefault(metricName="areaUnderROC")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.4.0")
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC", weightCol=None):
"""
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC", weightCol=None)
Sets params for binary classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol, HasWeightCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Regression, which expects input columns prediction, label
and an optional weight column.
>>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5),
... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw")
>>> evaluator.evaluate(dataset)
2.842...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"})
0.993...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"})
2.649...
>>> re_path = temp_path + "/re"
>>> evaluator.save(re_path)
>>> evaluator2 = RegressionEvaluator.load(re_path)
>>> str(evaluator2.getPredictionCol())
'raw'
>>> scoreAndLabelsAndWeight = [(-28.98343821, -27.0, 1.0), (20.21491975, 21.5, 0.8),
... (-25.98418959, -22.0, 1.0), (30.69731842, 33.0, 0.6), (74.69283752, 71.0, 0.2)]
>>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["raw", "label", "weight"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw", weightCol="weight")
>>> evaluator.evaluate(dataset)
2.740...
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"""metric name in evaluation - one of:
rmse - root mean squared error (default)
mse - mean squared error
r2 - r^2 metric
mae - mean absolute error.""",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="rmse", weightCol=None):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="rmse", weightCol=None)
"""
super(RegressionEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid)
self._setDefault(metricName="rmse")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.4.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="rmse", weightCol=None):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="rmse", weightCol=None)
Sets params for regression evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol, HasWeightCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Multiclass Classification, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(0.0, 0.0), (0.0, 1.0), (0.0, 0.0),
... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "truePositiveRateByLabel",
... evaluator.metricLabel: 1.0})
0.75...
>>> mce_path = temp_path + "/mce"
>>> evaluator.save(mce_path)
>>> evaluator2 = MulticlassClassificationEvaluator.load(mce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
>>> scoreAndLabelsAndWeight = [(0.0, 0.0, 1.0), (0.0, 1.0, 1.0), (0.0, 0.0, 1.0),
... (1.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
... (2.0, 2.0, 1.0), (2.0, 0.0, 1.0)]
>>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["prediction", "label", "weight"])
...
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction",
... weightCol="weight")
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.66...
.. versionadded:: 1.5.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(f1|accuracy|weightedPrecision|weightedRecall|weightedTruePositiveRate|"
"weightedFalsePositiveRate|weightedFMeasure|truePositiveRateByLabel|"
"falsePositiveRateByLabel|precisionByLabel|recallByLabel|fMeasureByLabel)",
typeConverter=TypeConverters.toString)
metricLabel = Param(Params._dummy(), "metricLabel",
"The class whose metric will be computed in truePositiveRateByLabel|"
"falsePositiveRateByLabel|precisionByLabel|recallByLabel|fMeasureByLabel."
" Must be >= 0. The default value is 0.",
typeConverter=TypeConverters.toFloat)
beta = Param(Params._dummy(), "beta",
"The beta value used in weightedFMeasure|fMeasureByLabel."
" Must be > 0. The default value is 1.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0)
"""
super(MulticlassClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid)
self._setDefault(metricName="f1", metricLabel=0.0, beta=1.0)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.5.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.5.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setMetricLabel(self, value):
"""
Sets the value of :py:attr:`metricLabel`.
"""
return self._set(metricLabel=value)
@since("3.0.0")
def getMetricLabel(self):
"""
Gets the value of metricLabel or its default value.
"""
return self.getOrDefault(self.metricLabel)
@since("3.0.0")
def setBeta(self, value):
"""
Sets the value of :py:attr:`beta`.
"""
return self._set(beta=value)
@since("3.0.0")
def getBeta(self):
"""
Gets the value of beta or its default value.
"""
return self.getOrDefault(self.beta)
@keyword_only
@since("1.5.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0)
Sets params for multiclass classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MultilabelClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Multilabel Classification, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [([0.0, 1.0], [0.0, 2.0]), ([0.0, 2.0], [0.0, 1.0]),
... ([], [0.0]), ([2.0], [2.0]), ([2.0, 0.0], [2.0, 0.0]),
... ([0.0, 1.0, 2.0], [0.0, 1.0]), ([1.0], [1.0, 2.0])]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = MultilabelClassificationEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.63...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.54...
>>> mlce_path = temp_path + "/mlce"
>>> evaluator.save(mlce_path)
>>> evaluator2 = MultilabelClassificationEvaluator.load(mlce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 3.0.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(subsetAccuracy|accuracy|hammingLoss|precision|recall|f1Measure|"
"precisionByLabel|recallByLabel|f1MeasureByLabel|microPrecision|"
"microRecall|microF1Measure)",
typeConverter=TypeConverters.toString)
metricLabel = Param(Params._dummy(), "metricLabel",
"The class whose metric will be computed in precisionByLabel|"
"recallByLabel|f1MeasureByLabel. "
"Must be >= 0. The default value is 0.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1Measure", metricLabel=0.0):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1Measure", metricLabel=0.0)
"""
super(MultilabelClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MultilabelClassificationEvaluator", self.uid)
self._setDefault(metricName="f1Measure", metricLabel=0.0)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("3.0.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("3.0.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setMetricLabel(self, value):
"""
Sets the value of :py:attr:`metricLabel`.
"""
return self._set(metricLabel=value)
@since("3.0.0")
def getMetricLabel(self):
"""
Gets the value of metricLabel or its default value.
"""
return self.getOrDefault(self.metricLabel)
@keyword_only
@since("3.0.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1Measure", metricLabel=0.0):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1Measure", metricLabel=0.0)
Sets params for multilabel classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class ClusteringEvaluator(JavaEvaluator, HasPredictionCol, HasFeaturesCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Clustering results, which expects two input
columns: prediction and features. The metric computes the Silhouette
measure using the squared Euclidean distance.
The Silhouette is a measure for the validation of the consistency
within clusters. It ranges between 1 and -1, where a value close to
1 means that the points in a cluster are close to the other points
in the same cluster and far from the points of the other clusters.
>>> from pyspark.ml.linalg import Vectors
>>> featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]),
... [([0.0, 0.5], 0.0), ([0.5, 0.0], 0.0), ([10.0, 11.0], 1.0),
... ([10.5, 11.5], 1.0), ([1.0, 1.0], 0.0), ([8.0, 6.0], 1.0)])
>>> dataset = spark.createDataFrame(featureAndPredictions, ["features", "prediction"])
...
>>> evaluator = ClusteringEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.9079...
>>> ce_path = temp_path + "/ce"
>>> evaluator.save(ce_path)
>>> evaluator2 = ClusteringEvaluator.load(ce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 2.3.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (silhouette)",
typeConverter=TypeConverters.toString)
distanceMeasure = Param(Params._dummy(), "distanceMeasure", "The distance measure. " +
"Supported options: 'squaredEuclidean' and 'cosine'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
__init__(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
"""
super(ClusteringEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.ClusteringEvaluator", self.uid)
self._setDefault(metricName="silhouette", distanceMeasure="squaredEuclidean")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.3.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("2.3.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("2.3.0")
def setParams(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
setParams(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
Sets params for clustering evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.evaluation
from pyspark.sql import SparkSession
globs = pyspark.ml.evaluation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.evaluation tests")\
.getOrCreate()
globs['spark'] = spark
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
aosagie/spark
|
python/pyspark/ml/evaluation.py
|
Python
|
apache-2.0
| 23,233
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals, division, absolute_import
import locale
import os, sys, unittest, tempfile, shutil, subprocess, re, json, platform
import time
import random
import functools
import datetime
from contextlib import contextmanager
import dxpy
from dxpy.compat import str, basestring, USING_PYTHON2
_run_all_tests = 'DXTEST_FULL' in os.environ
TEST_AZURE = ((os.environ.get('DXTEST_AZURE', '').startswith('azure:') and os.environ['DXTEST_AZURE']) or
(os.environ.get('DXTEST_AZURE') and 'azure:westus'))
TEST_BENCHMARKS = 'DXTEST_BENCHMARKS' in os.environ ## Used to exclude benchmarks from normal runs
TEST_DX_LOGIN = 'DXTEST_LOGIN' in os.environ
TEST_ENV = _run_all_tests or 'DXTEST_ENV' in os.environ
TEST_DX_DOCKER = 'DXTEST_DOCKER' in os.environ
TEST_FUSE = _run_all_tests or 'DXTEST_FUSE' in os.environ
TEST_HTTP_PROXY = _run_all_tests or 'DXTEST_HTTP_PROXY' in os.environ
TEST_ISOLATED_ENV = _run_all_tests or 'DXTEST_ISOLATED_ENV' in os.environ
TEST_MULTIPLE_USERS = _run_all_tests or 'DXTEST_SECOND_USER' in os.environ
TEST_NO_RATE_LIMITS = _run_all_tests or 'DXTEST_NO_RATE_LIMITS' in os.environ
TEST_ONLY_MASTER = 'DX_RUN_NEXT_TESTS' in os.environ
TEST_RUN_JOBS = _run_all_tests or 'DXTEST_RUN_JOBS' in os.environ
TEST_TCSH = _run_all_tests or 'DXTEST_TCSH' in os.environ
TEST_WITH_AUTHSERVER = _run_all_tests or 'DXTEST_WITH_AUTHSERVER' in os.environ
TEST_WITH_SMOKETEST_APP = _run_all_tests or 'DXTEST_WITH_SMOKETEST_APP' in os.environ
def _transform_words_to_regexp(s):
return r"\s+".join(re.escape(word) for word in s.split())
def host_is_centos_5():
if USING_PYTHON2:
distro = platform.linux_distribution()
if distro[0] == 'CentOS' and distro[1].startswith('5.'):
return True
return False
class DXCalledProcessError(subprocess.CalledProcessError):
def __init__(self, returncode, cmd, output=None, stderr=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
self.stderr = stderr
def __str__(self):
return "Command '%s' returned non-zero exit status %d, stderr:\n%s" % (self.cmd, self.returncode, self.stderr)
def check_output(*popenargs, **kwargs):
"""
Adapted version of the builtin subprocess.check_output which sets a
"stderr" field on the resulting exception (in addition to "output")
if the subprocess fails. (If the command succeeds, the contents of
stderr are discarded.)
:param also_return_stderr: if True, return stderr along with the output of the command as such (output, stderr)
:type also_return_stderr: bool
Unlike subprocess.check_output, unconditionally decodes the contents of the subprocess stdout and stderr using
sys.stdin.encoding.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
return_stderr = False
if 'also_return_stderr' in kwargs:
if kwargs['also_return_stderr']:
return_stderr = True
del kwargs['also_return_stderr']
# Unplug stdin (if not already overridden) so that dx doesn't prompt
# user for input at the tty
process = subprocess.Popen(stdin=kwargs.get('stdin', subprocess.PIPE),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, *popenargs, **kwargs)
output, err = process.communicate()
retcode = process.poll()
output = output.decode(locale.getpreferredencoding())
err = err.decode(locale.getpreferredencoding())
if retcode:
print(err)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
exc = DXCalledProcessError(retcode, cmd, output=output, stderr=err)
raise exc
if return_stderr:
return (output, err)
else:
return output
@contextmanager
def chdir(dirname=None):
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def run(command, **kwargs):
print("$ %s" % (command,))
if platform.system() == 'Windows':
# Before running unicode command strings here via subprocess, avoid
# letting Python 2.7 on Windows default to encoding the string with
# the ascii codec - use the preferred encoding of the OS instead
# (which will likely be 'cp1252'):
command_encoded = command.encode(locale.getpreferredencoding())
output = check_output(command_encoded, shell=True, **kwargs)
else:
output = check_output(command, shell=True, **kwargs)
print(output)
return output
@contextmanager
def temporary_project(name='dx client tests temporary project', cleanup=True, reclaim_permissions=False, select=False,
region=None, **kwargs):
"""Creates a temporary project scoped to the context manager, and
yields a DXProject handler for the project.
:param cleanup:
if False, do not clean up the project when done (useful for
debugging so you can examine the state of the project)
:type cleanup: bool
:param reclaim_permissions:
if True, attempts a project-xxxx/join before trying to destroy
the project. May be needed if the test reduced its own
permissions in the project.
:type reclaim_permissions: bool
:param select:
if True, sets the environment variable DX_PROJECT_CONTEXT_ID
(and restores the previous value afterwards) so that subprocess
calls made within the block use the new project by default.
:type select: bool
:param region:
Region name to create a project in. If None the project is created
in the default region.
:type region: str
"""
input_params = {'name': name}
if region is not None:
input_params["region"] = region
temp_project = dxpy.DXProject(dxpy.api.project_new(input_params, **kwargs)['id'])
try:
if select:
with select_project(temp_project):
yield temp_project
else:
yield temp_project
finally:
if reclaim_permissions:
dxpy.DXHTTPRequest('/' + temp_project.get_id() + '/join', {'level': 'ADMINISTER'}, **kwargs)
if cleanup:
dxpy.api.project_destroy(temp_project.get_id(), {"terminateJobs": True}, **kwargs)
@contextmanager
def select_project(project_or_project_id):
"""Selects a project by setting the DX_PROJECT_CONTEXT_ID in
dxpy.config (and therefore os.environ); this change is propagated
to subprocesses that are invoked with the default settings. The
original setting of DX_PROJECT_CONTEXT_ID is restored when the
block exits.
:param project_or_project_id:
Project or container to select. May be specified either as a
string containing the project ID, or a DXProject handler.
:type project_or_project_id: str or DXProject
"""
if isinstance(project_or_project_id, basestring) or project_or_project_id is None:
project_id = project_or_project_id
else:
project_id = project_or_project_id.get_id()
current_project_env_var = dxpy.config.get('DX_PROJECT_CONTEXT_ID', None)
if project_id is None:
del dxpy.config['DX_PROJECT_CONTEXT_ID']
else:
dxpy.config['DX_PROJECT_CONTEXT_ID'] = project_id
try:
yield None
finally:
if current_project_env_var is None:
del dxpy.config['DX_PROJECT_CONTEXT_ID']
else:
dxpy.config['DX_PROJECT_CONTEXT_ID'] = current_project_env_var
# Invoke "dx cd" without using bash (as 'run' would) so that the config
# gets attached to this Python process (instead of the bash process) and
# will be applied in later calls in the same test.
#
# Some tests can also use the select_project helper but that code sets
# the environment variables, and this writes the config to disk, and we
# should test both code paths.
def cd(directory):
print("$ dx cd %s" % (directory,))
output = check_output(['dx', 'cd', directory], shell=False)
print(output)
return output
# Wait for all jobs in analysis to be created (see PTFM-14462)
def analysis_describe_with_retry(analysis_id_or_handler):
if isinstance(analysis_id_or_handler, basestring):
handler = dxpy.get_handler(analysis_id_or_handler)
else:
handler = analysis_id_or_handler
# All the describe fields may not be available immediately. Wait
# until they have been populated.
for i in range(200): # Don't wait an unbounded amount of time
desc = handler.describe()
# Sufficient to look for any field, other than 'id', that is
# present in all job describe hashes
if all('executable' in stage['execution'] for stage in desc['stages']):
return desc
time.sleep(0.5)
raise IOError('Timed out while waiting for ' + analysis_id_or_handler.get_id() + ' to have all jobs populated')
def override_environment(**kwargs):
"""Returns a copy of the current environment, with variables overridden
as specified in the arguments. Each key represents a variable name
and each value must be a string (to set the specified key to that
value) or None (to unset the specified key).
"""
env = os.environ.copy()
for key in kwargs:
if kwargs[key] is None:
if key in env:
del env[key]
else:
env[key] = kwargs[key]
return env
def as_second_user():
second = json.loads(os.environ['DXTEST_SECOND_USER'])
context = {"auth_token": second['auth'], "auth_token_type": "Bearer"}
override = {"DX_SECURITY_CONTEXT": json.dumps(context),
"DX_USERNAME": second['user']}
return override_environment(**override)
def generate_unique_username_email():
r = random.randint(0, 255)
username = "asset_" + str(int(time.time())) + "_" + str(r)
email = username + "@example.com"
return username, email
# Note: clobbers the local environment! All tests that use this should
# be marked as such with TEST_ENV
@contextmanager
def without_project_context():
"""Within the scope of the block, the project context and workspace
configuration variables (and possibly other variables) are unset.
"""
prev_workspace_id = os.environ.get('DX_WORKSPACE_ID', None)
prev_proj_context_id = os.environ.get('DX_PROJECT_CONTEXT_ID', None)
if prev_workspace_id is not None:
del os.environ['DX_WORKSPACE_ID']
if prev_proj_context_id is not None:
del os.environ['DX_PROJECT_CONTEXT_ID']
subprocess.check_call("dx clearenv", shell=True)
try:
yield
finally:
if prev_workspace_id:
os.environ['DX_WORKSPACE_ID'] = prev_workspace_id
if prev_proj_context_id:
os.environ['DX_PROJECT_CONTEXT_ID'] = prev_proj_context_id
# Note: clobbers the local environment! All tests that use this should
# be marked as such with TEST_ENV
@contextmanager
def without_auth():
"""Within the scope of the block, the auth configuration variable (and
possibly other variables) are unset.
"""
prev_security_context = os.environ.get('DX_SECURITY_CONTEXT', None)
if prev_security_context is not None:
del os.environ['DX_SECURITY_CONTEXT']
subprocess.check_call("dx clearenv", shell=True)
try:
yield
finally:
if prev_security_context:
os.environ['DX_SECURITY_CONTEXT'] = prev_security_context
class DXTestCaseCompat(unittest.TestCase):
# method removed in python3
def assertItemsEqual(self, a, b):
self.assertEqual(sorted(a), sorted(b))
# methods with different names in python 2 and 3
# For example:
# v2 assertRaisesRegexp
# v3 assertRaisesRegex
if USING_PYTHON2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
assertRegex = unittest.TestCase.assertRegexpMatches
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
else:
assertRaisesRegex = unittest.TestCase.assertRaisesRegex
assertRegex = unittest.TestCase.assertRegex
assertNotRegex = unittest.TestCase.assertNotRegex
class DXTestCase(DXTestCaseCompat):
def setUp(self):
proj_name = u"dxclient_test_pröject"
self.project = dxpy.api.project_new({"name": proj_name})['id']
dxpy.config["DX_PROJECT_CONTEXT_ID"] = self.project
cd(self.project + ":/")
dxpy.config.__init__(suppress_warning=True)
if 'DX_CLI_WD' in dxpy.config:
del dxpy.config['DX_CLI_WD']
def tearDown(self):
if "DX_USER_CONF_DIR" in os.environ:
os.environ.pop("DX_USER_CONF_DIR")
try:
dxpy.api.project_destroy(self.project, {"terminateJobs": True})
except Exception as e:
print("Failed to remove test project:", str(e))
if 'DX_PROJECT_CONTEXT_ID' in dxpy.config:
del dxpy.config['DX_PROJECT_CONTEXT_ID']
if 'DX_CLI_WD' in dxpy.config:
del dxpy.config['DX_CLI_WD']
# Be sure to use the check_output defined in this module if you wish
# to use stderr_regexp. Python's usual subprocess.check_output
# doesn't propagate stderr back to us.
@contextmanager
def assertSubprocessFailure(self, output_regexp=None, output_text=None, stderr_regexp=None, stderr_text=None, exit_code=3):
"""Asserts that the block being wrapped exits with CalledProcessError.
:param output_regexp: subprocess output must match this regexp
:type output_regexp: str
:param output_text: subprocess output must contain this string (allowing for whitespace changes)
:type output_text: str
:param stderr_regexp: subprocess stderr must match this regexp
:type stderr_regexp: str
:param stderr_text: subprocess stderr must contain this string (allowing for whitespace changes)
:type stderr_text: str
:param exit_code: assert subprocess exits with this exit code
:type exit_code: int
"""
# TODO: print out raw output_text or stderr_text if assertion
# fails for easier human parsing
if output_text is not None:
if output_regexp is not None:
raise ValueError("Cannot specify both output_regexp and output_text")
output_regexp = _transform_words_to_regexp(output_text)
if stderr_text is not None:
if stderr_regexp is not None:
raise ValueError("Cannot specify both stderr_regexp and stderr_text")
stderr_regexp = _transform_words_to_regexp(stderr_text)
try:
yield
except subprocess.CalledProcessError as e:
self.assertEqual(exit_code, e.returncode, "Expected command to return code %d but it returned code %d" % (exit_code, e.returncode))
if output_regexp:
print("stdout:")
print(e.output)
self.assertTrue(re.search(output_regexp, e.output), "Expected stdout to match '%s' but it didn't" % (output_regexp,))
if stderr_regexp:
if not hasattr(e, 'stderr'):
raise Exception('A stderr_regexp was supplied but the CalledProcessError did not return the contents of stderr')
if not re.search(stderr_regexp, e.stderr):
print("stderr:")
print(e.stderr)
self.fail("Expected stderr (%s) to match '%s' but it didn't".format(stderr_regexp, stderr_regexp))
return
self.assertFalse(True, "Expected command to fail with CalledProcessError but it succeeded")
def assertFileContentsEqualsString(self, path, s):
self.assertEqual(open(os.sep.join(path)).read(), s)
def _dictToPPJSON(self, d):
return json.dumps(d, sort_keys=True, indent=4, separators=(',', ': '))
def assertDictSubsetOf(self, subset_dict, containing_dict):
mm_items = []
mm_missing = []
for (key, value) in list(subset_dict.items()):
if key in containing_dict:
if value != containing_dict[key]:
mm_items.append(key)
else:
mm_missing.append(key)
err_items = len(mm_items) > 0
err_missing = len(mm_missing) > 0
if err_items or err_missing:
subset_json = self._dictToPPJSON(subset_dict)
containing_json = self._dictToPPJSON(containing_dict)
error_string = "Expected the following:\n"
error_string += "{}\n\nto be a subset of\n\n{}\n\n".format(subset_json,
containing_json)
if err_items:
m = ", ".join([str(x) for x in mm_items])
error_string += "Field value mismatch at keys: {}\n".format(m)
if err_missing:
m = ", ".join([str(x) for x in mm_missing])
error_string += "Keys missing from superset: {}\n".format(m)
self.assertFalse(True, error_string)
def write_app_directory_in_dir(self, temp_file_path, app_name, dxapp_str, code_filename, code_content):
# Note: if called twice with the same app_name, will overwrite
# the dxapp.json and code file (if specified) but will not
# remove any other files that happened to be present
try:
os.mkdir(os.path.join(temp_file_path, app_name))
except OSError as e:
if e.errno != 17: # directory already exists
raise e
if dxapp_str is not None:
with open(os.path.join(temp_file_path, app_name, 'dxapp.json'), 'wb') as manifest:
manifest.write(dxapp_str.encode())
if code_filename:
with open(os.path.join(temp_file_path, app_name, code_filename), 'w') as code_file:
code_file.write(code_content)
return os.path.join(temp_file_path, app_name)
class DXTestCaseBuildWorkflows(DXTestCase):
"""
This class adds methods to ``DXTestCase`` related to (global) workflow
creation and destruction.
"""
base_workflow_spec = {
"name": "my_workflow",
"outputFolder": "/"
}
def setUp(self):
super(DXTestCaseBuildWorkflows, self).setUp()
self.temp_file_path = tempfile.mkdtemp()
self.test_applet_id = self.create_applet(self.project)
self.dxworkflow_spec = self.create_dxworkflow_spec()
def tearDown(self):
shutil.rmtree(self.temp_file_path)
super(DXTestCaseBuildWorkflows, self).tearDown()
def write_workflow_directory(self, workflow_name, dxworkflow_str,
readme_content="Workflow doc", build_basic=False):
# Note: if called twice with the same workflow_name, will overwrite
# the dxworkflow.json and code file (if specified) but will not
# remove any other files that happened to be present
try:
os.mkdir(os.path.join(self.temp_file_path, workflow_name))
except OSError as e:
if e.errno != 17: # directory already exists
raise e
if dxworkflow_str is not None:
with open(os.path.join(self.temp_file_path, workflow_name, 'dxworkflow.json'), 'wb') as manifest:
manifest.write(dxworkflow_str.encode())
elif build_basic:
with open(os.path.join(self.temp_file_path, workflow_name, 'dxworkflow.json'), 'wb') as manifest:
manifest.write(self.base_workflow_spec)
with open(os.path.join(self.temp_file_path, workflow_name, 'Readme.md'), 'w') as readme_file:
readme_file.write(readme_content)
return os.path.join(self.temp_file_path, workflow_name)
def create_applet_spec(self, project_id):
return {"name": "my_first_applet",
"project": project_id,
"dxapi": "1.0.0",
"inputSpec": [{"name": "number", "class": "int"}],
"outputSpec": [{"name": "number", "class": "int"}],
"runSpec": {"interpreter": "bash",
"distribution": "Ubuntu",
"release": "14.04",
"code": "exit 0"}
}
def create_applet(self, project_id):
return dxpy.api.applet_new(self.create_applet_spec(project_id))['id']
def create_workflow_spec(self, project_id):
workflow_spec = {"name": "my_workflow",
"project": project_id,
"stages": [{"id": "stage_0",
"name": "stage_0_name",
"executable": self.test_applet_id,
"input": {"number": {"$dnanexus_link": {"workflowInputField": "foo"}}},
"folder": "/stage_0_output",
"executionPolicy": {"restartOn": {}, "onNonRestartableFailure": "failStage"},
"systemRequirements": {"main": {"instanceType": "mem1_ssd1_x2"}}},
{"id": "stage_1",
"executable": self.test_applet_id,
"input": {"number": {"$dnanexus_link": {"stage": "stage_0",
"outputField": "number"}}}}],
"workflow_inputs": [{"name": "foo", "class": "int"}],
"workflow_outputs": [{"name": "bar", "class": "int", "outputSource":
{"$dnanexus_link": {"stage": "stage_0", "outputField": "number"}}}]
}
return workflow_spec
def create_workflow(self, project_id, workflow_spec=None):
if not workflow_spec:
workflow_spec = self.create_workflow_spec(project_id)
dxworkflow = dxpy.DXWorkflow()
dxworkflow.new(**workflow_spec)
dxworkflow._close(dxworkflow.get_id())
return dxworkflow
def create_global_workflow_spec(self, project_id, name, version, workflow_spec=None):
dxworkflow = self.create_workflow(project_id, workflow_spec)
dxglobalworkflow_spec = {
"name": name,
"version": version,
"regionalOptions": {
"aws:us-east-1": {
"workflow": dxworkflow.get_id()
}
}
}
return dxglobalworkflow_spec
def create_global_workflow(self, project_id, name, version, workflow_spec=None):
spec = self.create_global_workflow_spec(project_id, name, version, workflow_spec)
dxglobalworkflow = dxpy.DXGlobalWorkflow()
dxglobalworkflow.new(**spec)
return dxglobalworkflow
def create_dxworkflow_spec(self):
return {"name": "my_workflow",
"title": "This is a beautiful workflow",
"version": "0.0.1",
"dxapi": "1.0.0",
"stages": [{"id": "stage_0",
"name": "stage_0_name",
"executable": self.test_applet_id,
"input": {"number": 777},
"folder": "/stage_0_output",
"executionPolicy": {"onNonRestartableFailure": "failStage"},
"systemRequirements": {"main": {"instanceType": "mem1_ssd1_x2"}}},
{"id": "stage_1",
"executable": self.test_applet_id,
"input": {"number": {"$dnanexus_link": {"stage": "stage_0",
"outputField": "number"}}}}]}
def write_app_directory(self, app_name, dxapp_str, code_filename=None, code_content="\n"):
return self.write_app_directory_in_dir(self.temp_file_path, app_name, dxapp_str, code_filename, code_content)
class DXTestCaseBuildApps(DXTestCase):
"""
This class adds methods to ``DXTestCase`` related to app creation,
app destruction, and extraction of app data as local files.
"""
base_applet_spec = {
"dxapi": "1.0.0",
"runSpec": {
"file": "code.py",
"interpreter": "python2.7",
"distribution": "Ubuntu",
"release": "14.04",
"version": '0'
},
"inputSpec": [],
"outputSpec": [],
"ignoreReuse": False
}
base_app_spec = dict(base_applet_spec, version="1.0.0")
def setUp(self):
super(DXTestCaseBuildApps, self).setUp()
self.temp_file_path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_file_path)
super(DXTestCaseBuildApps, self).tearDown()
def make_apps(self, num_apps, name_prefix, bill_to=None):
apps = []
app_spec = dict(self.base_app_spec)
for i in range(num_apps):
app_spec["name"] = name_prefix + "_" + str(i)
if bill_to is not None:
app_spec["billTo"] = bill_to
app_dir = self.write_app_directory("minimal_åpp",
json.dumps(app_spec),
"code.py")
app = json.loads(run("dx build --create-app --json " + app_dir))
apps.append(app)
return apps
def write_app_directory(self, app_name, dxapp_str, code_filename=None, code_content="\n"):
# Note: if called twice with the same app_name, will overwrite
# the dxapp.json and code file (if specified) but will not
# remove any other files that happened to be present
p = os.path.join(self.temp_file_path, app_name)
pb = p.encode("utf-8")
try:
os.mkdir(pb)
except OSError as e:
if e.errno != 17: # directory already exists
raise e
if dxapp_str is not None:
with open(os.path.join(pb, b'dxapp.json'), 'wb') as manifest:
manifest.write(dxapp_str.encode())
if code_filename:
with open(os.path.join(pb, code_filename.encode("utf-8")), 'w') as code_file:
code_file.write(code_content)
return p
class TemporaryFile:
''' A wrapper class around a NamedTemporaryFile. Intended for use inside a 'with' statement.
It returns a file-like object that can be opened by another process for writing, in particular
in Windows, where the OS does not allow multiple handles to a single file. The parameter
'close' determines if the file is returned closed or open.
'''
def __init__(self, mode='w+', bufsize=-1, suffix='', prefix='tmp', dir=None, delete=True, close=False):
if USING_PYTHON2:
self.temp_file = tempfile.NamedTemporaryFile(mode, bufsize, suffix, prefix, dir, delete=False)
else:
self.temp_file = tempfile.NamedTemporaryFile(mode, bufsize, "utf-8", None, suffix, prefix, dir, delete=False)
self.name = self.temp_file.name
self.delete = delete
if (close):
self.temp_file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.delete:
os.unlink(self.name)
def write(self, buf):
return self.temp_file.write(buf)
def flush(self):
return self.temp_file.flush()
def close(self):
return self.temp_file.close()
def update_traceability_matrix(id_array):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
daystr = datetime.datetime.today().strftime('%Y%m%d')
with open("{0}.traceability.{1}.csv".format(os.path.splitext(os.path.basename(__file__))[0], daystr), "a") as f:
secstr = datetime.datetime.today().strftime('%Y%m%d%H%M%S')
try:
retval = func(*args, **kwargs)
for tid in id_array:
f.write("{0},{1},{2}\n".format(tid, "PASS", secstr))
return retval
except Exception as e:
for tid in id_array:
f.write("{0},{1},{2}\n".format(tid, "FAIL", secstr))
raise
return wrapper
return decorator
|
dnanexus/dx-toolkit
|
src/python/test/dxpy_testutil.py
|
Python
|
apache-2.0
| 29,488
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import json
import logging
import os
import re
import sys
import tempfile
import time
import traceback
import zipfile
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
import django.views.debug
import desktop.conf
import desktop.log.log_buffer
from desktop.api import massaged_tags_for_json, massaged_documents_for_json, _get_docs
from desktop.lib import django_mako
from desktop.lib.conf import GLOBAL_CONFIG, BoundConfig
from desktop.lib.django_util import JsonResponse, login_notrequired, render_json, render
from desktop.lib.i18n import smart_str
from desktop.lib.paths import get_desktop_root
from desktop.lib.thread_util import dump_traceback
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences, Settings, Document2
from desktop import appmanager
LOG = logging.getLogger(__name__)
@require_http_methods(['HEAD'])
def is_alive(request):
return HttpResponse('')
def home(request):
docs = _get_docs(request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home.mako', request, {
'apps': apps,
'json_documents': json.dumps(massaged_documents_for_json(docs, request.user)),
'json_tags': json.dumps(massaged_tags_for_json(docs, request.user)),
'tours_and_tutorials': Settings.get_settings().tours_and_tutorials
})
def home2(request):
docs = Document2.objects.filter(owner=request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home2.mako', request, {
'apps': apps,
'json_documents': json.dumps([doc.to_dict() for doc in docs]),
'tours_and_tutorials': Settings.get_settings().tours_and_tutorials
})
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", "")))
return render('logs.mako', request, dict(log=[_("No logs found!")]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(smart_str(l, errors='replace') + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper, content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
LOG.exception("Couldn't construct zip file to write logs")
return log_view(request)
return render_to_response("logs.mako", dict(log=[_("No logs found.")]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [(app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name)]
# Iterator over the streams.
concatenated = ["\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, content_type='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if request.GET.get("private"):
show_private = True
apps = sorted(appmanager.DESKTOP_MODULES, key=lambda app: app.name)
apps_names = [app.name for app in apps]
top_level = sorted(GLOBAL_CONFIG.get().values(), key=lambda obj: apps_names.index(obj.config.key))
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=top_level,
conf_dir=conf_dir,
apps=apps))
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
out = StringIO.StringIO()
dump_traceback(file=out)
return HttpResponse(out.getvalue(), content_type="text/plain")
@access_log_level(logging.WARN)
def memory(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if not hasattr(settings, 'MEMORY_PROFILER'):
return HttpResponse(_("You must enable the memory profiler via the memory_profiler config in the hue.ini."))
# type, from, to, index
command_order = {
'type': 0,
'from': 1,
'to': 2,
'index': 3
}
default_command = [None, None, None, None]
commands = []
for item in request.GET:
res = re.match(r'(?P<command>\w+)\.(?P<count>\d+)', item)
if res:
d = res.groupdict()
count = int(d['count'])
command = str(d['command'])
while len(commands) <= count:
commands.append(default_command[:])
commands[count][command_order.get(command)] = request.GET.get(item)
heap = settings.MEMORY_PROFILER.heap()
for command in commands:
if command[0] is not None:
heap = getattr(heap, command[0])
if command[1] is not None and command[2] is not None:
heap = heap[int(command[1]):int(command[2])]
if command[3] is not None:
heap = heap[int(command[3])]
return HttpResponse(str(heap), content_type="text/plain")
def jasmine(request):
return render('jasmine.mako', request, None)
@login_notrequired
def unsupported(request):
return render('unsupported.mako', request, None)
def index(request):
if request.user.is_superuser and request.COOKIES.get('hueLandingPage') != 'home':
return redirect(reverse('about:index'))
else:
return home(request)
def csrf_failure(request, reason=None):
"""Registered handler for CSRF."""
access_warn(request, reason)
return render("403_csrf.mako", request, dict(uri=request.build_absolute_uri()), status=403)
def serve_403_error(request, *args, **kwargs):
"""Registered handler for 403. We just return a simple error"""
access_warn(request, "403 access forbidden")
return render("403.mako", request, dict(uri=request.build_absolute_uri()), status=403)
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
try:
exc_info = sys.exc_info()
if exc_info:
if desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]:
# If (None, None, None), default server error describing why this failed.
return django.views.debug.technical_500_response(request, *exc_info)
else:
# Could have an empty traceback
return render("500.mako", request, {'traceback': traceback.extract_tb(exc_info[2])})
else:
# exc_info could be empty
return render("500.mako", request, {})
finally:
# Fallback to default 500 response if ours fails
# Will end up here:
# - Middleware or authentication backends problems
# - Certain missing imports
# - Packaging and install issues
pass
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def commonheader(title, section, user, padding="90px"):
"""
Returns the rendered common header
"""
current_app = None
other_apps = []
if user.is_authenticated():
apps = appmanager.get_apps(user)
apps_list = appmanager.get_apps_dict(user)
for app in apps:
if app.display_name not in [
'beeswax', 'impala', 'pig', 'jobsub', 'jobbrowser', 'metastore', 'hbase', 'sqoop', 'oozie', 'filebrowser',
'useradmin', 'search', 'help', 'about', 'zookeeper', 'proxy', 'rdbms', 'spark', 'indexer', 'security', 'notebook']:
other_apps.append(app)
if section == app.display_name:
current_app = app
else:
apps_list = []
return django_mako.render_to_string("common_header.mako", {
'current_app': current_app,
'apps': apps_list,
'other_apps': other_apps,
'title': title,
'section': section,
'padding': padding,
'user': user,
'is_demo': desktop.conf.DEMO_ENABLED.get(),
'is_ldap_setup': 'desktop.auth.backend.LdapBackend' in desktop.conf.AUTH.BACKEND.get()
})
def commonshare():
return django_mako.render_to_string("common_share.mako", {})
def commonimportexport(request):
return django_mako.render_to_string("common_import_export.mako", {'request': request})
def commonfooter(messages=None):
"""
Returns the rendered common footer
"""
if messages is None:
messages = {}
hue_settings = Settings.get_settings()
return django_mako.render_to_string("common_footer.mako", {
'messages': messages,
'version': settings.HUE_DESKTOP_VERSION,
'collect_usage': collect_usage(),
'tours_and_tutorials': hue_settings.tours_and_tutorials
})
def collect_usage():
return desktop.conf.COLLECT_USAGE.get() and Settings.get_settings().collect_usage
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(request, cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
for confvar, error in validator(request.user):
error = {
'name': confvar if isinstance(confvar, str) else confvar.get_fully_qualifying_key(),
'message': error,
}
if isinstance(confvar, BoundConfig):
error['value'] = confvar.get()
error_list.append(error)
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
context = {
'conf_dir': os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))),
'error_list': _get_config_errors(request, cache=False),
}
if request.GET.get('format') == 'json':
return JsonResponse(context)
else:
return render('check_config.mako', request, context, force_template=True)
def check_config_ajax(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors(request)
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
# This is a global non-view for inline KO i18n
def _ko(str=""):
return _(str).replace("'", "\\'")
# This global Mako filtering option, use it with ${ yourvalue | n,antixss }
def antixss(value):
xss_regex = re.compile(r'<[^>]+>')
return xss_regex.sub('', value)
|
vmax-feihu/hue
|
desktop/core/src/desktop/views.py
|
Python
|
apache-2.0
| 16,942
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone External Authentication Plugins"""
import abc
from oslo_config import cfg
import six
from keystone import auth
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
CONF = cfg.CONF
@six.add_metaclass(abc.ABCMeta)
class Base(auth.AuthMethodHandler):
method = 'external'
def authenticate(self, context, auth_info, auth_context):
"""Use REMOTE_USER to look up the user in the identity backend.
auth_context is an in-out variable that will be updated with the
user_id from the actual user from the REMOTE_USER env variable.
"""
try:
REMOTE_USER = context['environment']['REMOTE_USER']
except KeyError:
msg = _('No authenticated user')
raise exception.Unauthorized(msg)
try:
user_ref = self._authenticate(REMOTE_USER, context)
auth_context['user_id'] = user_ref['id']
if ('kerberos' in CONF.token.bind and
(context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate')):
auth_context['bind']['kerberos'] = user_ref['name']
except Exception:
msg = _('Unable to lookup user %s') % (REMOTE_USER)
raise exception.Unauthorized(msg)
@abc.abstractmethod
def _authenticate(self, remote_user, context):
"""Look up the user in the identity backend.
Return user_ref
"""
pass
@dependency.requires('identity_api')
class DefaultDomain(Base):
def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend."""
domain_id = CONF.identity.admin_domain_id
user_ref = self.identity_api.get_user_by_name(remote_user, domain_id)
return user_ref
@dependency.requires('identity_api', 'resource_api')
class Domain(Base):
def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend.
The domain will be extracted from the REMOTE_DOMAIN environment
variable if present. If not, the default domain will be used.
"""
username = remote_user
try:
domain_name = context['environment']['REMOTE_DOMAIN']
except KeyError:
domain_id = CONF.identity.admin_domain_id
else:
domain_ref = self.resource_api.get_domain_by_name(domain_name)
domain_id = domain_ref['id']
user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
@dependency.requires('assignment_api', 'identity_api')
class KerberosDomain(Domain):
"""Allows `kerberos` as a method."""
method = 'kerberos'
def _authenticate(self, remote_user, context):
auth_type = context['environment'].get('AUTH_TYPE')
if auth_type != 'Negotiate':
raise exception.Unauthorized(_("auth_type is not Negotiate"))
return super(KerberosDomain, self)._authenticate(remote_user, context)
|
darren-wang/ks3
|
keystone/auth/plugins/external.py
|
Python
|
apache-2.0
| 3,647
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Functionality to perform file loads into BigQuery for Batch and Streaming
pipelines.
This source is able to work around BigQuery load quotas and limitations. When
destinations are dynamic, or when data for a single job is too large, the data
will be split into multiple jobs.
NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
"""
# pytype: skip-file
from __future__ import absolute_import
import datetime
import hashlib
import logging
import random
import uuid
from future.utils import iteritems
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.io import filesystems as fs
from apache_beam.io.gcp import bigquery_tools
from apache_beam.options import value_provider as vp
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
_LOGGER = logging.getLogger(__name__)
ONE_TERABYTE = (1 << 40)
# The maximum file size for imports is 5TB. We keep our files under that.
_DEFAULT_MAX_FILE_SIZE = 4 * ONE_TERABYTE
_DEFAULT_MAX_WRITERS_PER_BUNDLE = 20
# The maximum size for a single load job is one terabyte
_MAXIMUM_LOAD_SIZE = 15 * ONE_TERABYTE
# Big query only supports up to 10 thousand URIs for a single load job.
_MAXIMUM_SOURCE_URIS = 10 * 1000
# If triggering_frequency is supplied, we will trigger the file write after
# this many records are written.
_FILE_TRIGGERING_RECORD_COUNT = 500000
def _generate_load_job_name():
datetime_component = datetime.datetime.now().strftime("%Y_%m_%d_%H%M%S")
# TODO(pabloem): include job id / pipeline component?
return 'beam_load_%s_%s' % (datetime_component, random.randint(0, 100))
def file_prefix_generator(
with_validation=True, pipeline_gcs_location=None, temp_location=None):
def _generate_file_prefix(unused_elm):
# If a gcs location is provided to the pipeline, then we shall use that.
# Otherwise, we shall use the temp_location from pipeline options.
gcs_base = pipeline_gcs_location.get()
if not gcs_base:
gcs_base = temp_location
# This will fail at pipeline execution time, but will fail early, as this
# step doesn't have any dependencies (and thus will be one of the first
# stages to be run).
if with_validation and (not gcs_base or not gcs_base.startswith('gs://')):
raise ValueError(
'Invalid GCS location: %r.\n'
'Writing to BigQuery with FILE_LOADS method requires a '
'GCS location to be provided to write files to be loaded'
' loaded into BigQuery. Please provide a GCS bucket, or '
'pass method="STREAMING_INSERTS" to WriteToBigQuery.' % gcs_base)
prefix_uuid = _bq_uuid()
return fs.FileSystems.join(gcs_base, 'bq_load', prefix_uuid)
return _generate_file_prefix
def _make_new_file_writer(
file_prefix,
destination,
file_format,
schema=None,
schema_side_inputs=tuple()):
destination = bigquery_tools.get_hashable_destination(destination)
# Windows does not allow : on filenames. Replacing with underscore.
# Other disallowed characters are:
# https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
destination = destination.replace(':', '.')
directory = fs.FileSystems.join(file_prefix, destination)
if not fs.FileSystems.exists(directory):
fs.FileSystems.mkdirs(directory)
file_name = str(uuid.uuid4())
file_path = fs.FileSystems.join(file_prefix, destination, file_name)
if file_format == bigquery_tools.FileFormat.AVRO:
if callable(schema):
schema = schema(destination, *schema_side_inputs)
elif isinstance(schema, vp.ValueProvider):
schema = schema.get()
writer = bigquery_tools.AvroRowWriter(
fs.FileSystems.create(file_path, "application/avro"), schema)
elif file_format == bigquery_tools.FileFormat.JSON:
writer = bigquery_tools.JsonRowWriter(
fs.FileSystems.create(file_path, "application/text"))
else:
raise ValueError((
'Only AVRO and JSON are supported as intermediate formats for '
'BigQuery WriteRecordsToFile, got: {}.').format(file_format))
return file_path, writer
def _bq_uuid(seed=None):
if not seed:
return str(uuid.uuid4()).replace("-", "")
else:
return str(hashlib.md5(seed.encode('utf8')).hexdigest())
class _ShardDestinations(beam.DoFn):
"""Adds a shard number to the key of the KV element.
Experimental; no backwards compatibility guarantees."""
DEFAULT_SHARDING_FACTOR = 10
def __init__(self, sharding_factor=DEFAULT_SHARDING_FACTOR):
self.sharding_factor = sharding_factor
def start_bundle(self):
self._shard_count = random.randrange(self.sharding_factor)
def process(self, element):
destination = element[0]
row = element[1]
sharded_destination = (
destination, self._shard_count % self.sharding_factor)
self._shard_count += 1
yield (sharded_destination, row)
class WriteRecordsToFile(beam.DoFn):
"""Write input records to files before triggering a load job.
This transform keeps up to ``max_files_per_bundle`` files open to write to. It
receives (destination, record) tuples, and it writes the records to different
files for each destination.
If there are more than ``max_files_per_bundle`` destinations that we need to
write to, then those records are grouped by their destination, and later
written to files by ``WriteGroupedRecordsToFile``.
It outputs two PCollections.
"""
UNWRITTEN_RECORD_TAG = 'UnwrittenRecords'
WRITTEN_FILE_TAG = 'WrittenFiles'
def __init__(
self,
schema,
max_files_per_bundle=_DEFAULT_MAX_WRITERS_PER_BUNDLE,
max_file_size=_DEFAULT_MAX_FILE_SIZE,
file_format=None):
"""Initialize a :class:`WriteRecordsToFile`.
Args:
max_files_per_bundle (int): The maximum number of files that can be kept
open during execution of this step in a worker. This is to avoid over-
whelming the worker memory.
max_file_size (int): The maximum size in bytes for a file to be used in
an export job.
"""
self.schema = schema
self.max_files_per_bundle = max_files_per_bundle
self.max_file_size = max_file_size
self.file_format = file_format or bigquery_tools.FileFormat.JSON
def display_data(self):
return {
'max_files_per_bundle': self.max_files_per_bundle,
'max_file_size': str(self.max_file_size),
'file_format': self.file_format,
}
def start_bundle(self):
self._destination_to_file_writer = {}
def process(self, element, file_prefix, *schema_side_inputs):
"""Take a tuple with (destination, row) and write to file or spill out.
Destination may be a ``TableReference`` or a string, and row is a
Python dictionary for a row to be inserted to BigQuery."""
destination = bigquery_tools.get_hashable_destination(element[0])
row = element[1]
if destination not in self._destination_to_file_writer:
if len(self._destination_to_file_writer) < self.max_files_per_bundle:
self._destination_to_file_writer[destination] = _make_new_file_writer(
file_prefix,
destination,
self.file_format,
self.schema,
schema_side_inputs)
else:
yield pvalue.TaggedOutput(
WriteRecordsToFile.UNWRITTEN_RECORD_TAG, element)
return
(file_path, writer) = self._destination_to_file_writer[destination]
# TODO(pabloem): Is it possible for this to throw exception?
writer.write(row)
file_size = writer.tell()
if file_size > self.max_file_size:
writer.close()
self._destination_to_file_writer.pop(destination)
yield pvalue.TaggedOutput(
WriteRecordsToFile.WRITTEN_FILE_TAG,
(element[0], (file_path, file_size)))
def finish_bundle(self):
for destination, file_path_writer in \
iteritems(self._destination_to_file_writer):
(file_path, writer) = file_path_writer
file_size = writer.tell()
writer.close()
yield pvalue.TaggedOutput(
WriteRecordsToFile.WRITTEN_FILE_TAG,
GlobalWindows.windowed_value((destination, (file_path, file_size))))
self._destination_to_file_writer = {}
class WriteGroupedRecordsToFile(beam.DoFn):
"""Receives collection of dest-iterable(records), writes it to files.
This is different from ``WriteRecordsToFile`` because it receives records
grouped by destination. This means that it's not necessary to keep multiple
file descriptors open, because we know for sure when records for a single
destination have been written out.
Experimental; no backwards compatibility guarantees.
"""
def __init__(
self, schema, max_file_size=_DEFAULT_MAX_FILE_SIZE, file_format=None):
self.schema = schema
self.max_file_size = max_file_size
self.file_format = file_format or bigquery_tools.FileFormat.JSON
def process(self, element, file_prefix, *schema_side_inputs):
destination = element[0]
rows = element[1]
file_path, writer = None, None
for row in rows:
if writer is None:
(file_path, writer) = _make_new_file_writer(
file_prefix,
destination,
self.file_format,
self.schema,
schema_side_inputs)
writer.write(row)
file_size = writer.tell()
if file_size > self.max_file_size:
writer.close()
yield (destination, (file_path, file_size))
file_path, writer = None, None
if writer is not None:
writer.close()
yield (destination, (file_path, file_size))
class TriggerCopyJobs(beam.DoFn):
"""Launches jobs to copy from temporary tables into the main target table.
When a job needs to write to multiple destination tables, or when a single
destination table needs to have multiple load jobs to write to it, files are
loaded into temporary tables, and those tables are later copied to the
destination tables.
This transform emits (destination, job_reference) pairs.
TODO(BEAM-7822): In file loads method of writing to BigQuery,
copying from temp_tables to destination_table is not atomic.
See: https://issues.apache.org/jira/browse/BEAM-7822
"""
def __init__(
self, create_disposition=None, write_disposition=None, test_client=None):
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.test_client = test_client
self._observed_tables = set()
def start_bundle(self):
self._observed_tables = set()
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
def process(self, element, job_name_prefix=None):
destination = element[0]
job_reference = element[1]
copy_to_reference = bigquery_tools.parse_table_reference(destination)
if copy_to_reference.projectId is None:
copy_to_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
copy_from_reference = bigquery_tools.parse_table_reference(destination)
copy_from_reference.tableId = job_reference.jobId
if copy_from_reference.projectId is None:
copy_from_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
copy_job_name = '%s_copy_%s_to_%s' % (
job_name_prefix,
_bq_uuid(
'%s:%s.%s' % (
copy_from_reference.projectId,
copy_from_reference.datasetId,
copy_from_reference.tableId)),
_bq_uuid(
'%s:%s.%s' % (
copy_to_reference.projectId,
copy_to_reference.datasetId,
copy_to_reference.tableId)))
_LOGGER.info(
"Triggering copy job from %s to %s",
copy_from_reference,
copy_to_reference)
if copy_to_reference.tableId not in self._observed_tables:
# When the write_disposition for a job is WRITE_TRUNCATE,
# multiple copy jobs to the same destination can stump on
# each other, truncate data, and write to the BQ table over and
# over.
# Thus, the first copy job runs with the user's write_disposition,
# but afterwards, all jobs must always WRITE_APPEND to the table.
# If they do not, subsequent copy jobs will clear out data appended
# by previous jobs.
write_disposition = self.write_disposition
wait_for_job = True
self._observed_tables.add(copy_to_reference.tableId)
else:
wait_for_job = False
write_disposition = 'WRITE_APPEND'
job_reference = self.bq_wrapper._insert_copy_job(
copy_to_reference.projectId,
copy_job_name,
copy_from_reference,
copy_to_reference,
create_disposition=self.create_disposition,
write_disposition=write_disposition)
if wait_for_job:
self.bq_wrapper.wait_for_bq_job(job_reference, sleep_duration_sec=10)
yield (destination, job_reference)
class TriggerLoadJobs(beam.DoFn):
"""Triggers the import jobs to BQ.
Experimental; no backwards compatibility guarantees.
"""
TEMP_TABLES = 'TemporaryTables'
def __init__(
self,
schema=None,
create_disposition=None,
write_disposition=None,
test_client=None,
temporary_tables=False,
additional_bq_parameters=None,
source_format=None):
self.schema = schema
self.test_client = test_client
self.temporary_tables = temporary_tables
self.additional_bq_parameters = additional_bq_parameters or {}
self.source_format = source_format
if self.temporary_tables:
# If we are loading into temporary tables, we rely on the default create
# and write dispositions, which mean that a new table will be created.
self.create_disposition = None
self.write_disposition = None
else:
self.create_disposition = create_disposition
self.write_disposition = write_disposition
def display_data(self):
result = {
'create_disposition': str(self.create_disposition),
'write_disposition': str(self.write_disposition)
}
result['schema'] = str(self.schema)
return result
def start_bundle(self):
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
def process(self, element, load_job_name_prefix, *schema_side_inputs):
# Each load job is assumed to have files respecting these constraints:
# 1. Total size of all files < 15 TB (Max size for load jobs)
# 2. Total no. of files in a single load job < 10,000
# This assumption means that there will always be a single load job
# triggered for each partition of files.
destination = element[0]
files = element[1]
if callable(self.schema):
schema = self.schema(destination, *schema_side_inputs)
elif isinstance(self.schema, vp.ValueProvider):
schema = self.schema.get()
else:
schema = self.schema
if callable(self.additional_bq_parameters):
additional_parameters = self.additional_bq_parameters(destination)
elif isinstance(self.additional_bq_parameters, vp.ValueProvider):
additional_parameters = self.additional_bq_parameters.get()
else:
additional_parameters = self.additional_bq_parameters
table_reference = bigquery_tools.parse_table_reference(destination)
if table_reference.projectId is None:
table_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
# Load jobs for a single destination are always triggered from the same
# worker. This means that we can generate a deterministic numbered job id,
# and not need to worry.
destination_hash = _bq_uuid(
'%s:%s.%s' % (
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId))
uid = _bq_uuid()
job_name = '%s_%s_%s' % (load_job_name_prefix, destination_hash, uid)
_LOGGER.debug(
'Load job has %s files. Job name is %s.', len(files), job_name)
create_disposition = self.create_disposition
if self.temporary_tables:
# If we are using temporary tables, then we must always create the
# temporary tables, so we replace the create_disposition.
create_disposition = 'CREATE_IF_NEEDED'
# For temporary tables, we create a new table with the name with JobId.
table_reference.tableId = job_name
yield pvalue.TaggedOutput(TriggerLoadJobs.TEMP_TABLES, table_reference)
_LOGGER.info(
'Triggering job %s to load data to BigQuery table %s.'
'Schema: %s. Additional parameters: %s',
job_name,
table_reference,
schema,
additional_parameters)
job_reference = self.bq_wrapper.perform_load_job(
table_reference,
files,
job_name,
schema=schema,
write_disposition=self.write_disposition,
create_disposition=create_disposition,
additional_load_parameters=additional_parameters,
source_format=self.source_format)
yield (destination, job_reference)
class PartitionFiles(beam.DoFn):
MULTIPLE_PARTITIONS_TAG = 'MULTIPLE_PARTITIONS'
SINGLE_PARTITION_TAG = 'SINGLE_PARTITION'
class Partition(object):
def __init__(self, max_size, max_files, files=None, size=0):
self.max_size = max_size
self.max_files = max_files
self.files = files if files is not None else []
self.size = size
def can_accept(self, file_size, no_of_files=1):
if (((self.size + file_size) <= self.max_size) and
((len(self.files) + no_of_files) <= self.max_files)):
return True
else:
return False
def add(self, file_path, file_size):
self.files.append(file_path)
self.size += file_size
def __init__(self, max_partition_size, max_files_per_partition):
self.max_partition_size = max_partition_size
self.max_files_per_partition = max_files_per_partition
def process(self, element):
destination = element[0]
files = element[1]
partitions = []
latest_partition = PartitionFiles.Partition(
self.max_partition_size, self.max_files_per_partition)
for file_path, file_size in files:
if latest_partition.can_accept(file_size):
latest_partition.add(file_path, file_size)
else:
partitions.append(latest_partition.files)
latest_partition = PartitionFiles.Partition(
self.max_partition_size, self.max_files_per_partition)
latest_partition.add(file_path, file_size)
partitions.append(latest_partition.files)
if len(partitions) > 1:
output_tag = PartitionFiles.MULTIPLE_PARTITIONS_TAG
else:
output_tag = PartitionFiles.SINGLE_PARTITION_TAG
for partition in partitions:
yield pvalue.TaggedOutput(output_tag, (destination, partition))
class WaitForBQJobs(beam.DoFn):
"""Takes in a series of BQ job names as side input, and waits for all of them.
If any job fails, it will fail. If all jobs succeed, it will succeed.
Experimental; no backwards compatibility guarantees.
"""
def __init__(self, test_client=None):
self.test_client = test_client
def start_bundle(self):
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
def process(self, element, dest_ids_list):
job_references = [elm[1] for elm in dest_ids_list]
for ref in job_references:
# We must poll repeatedly until the job finishes or fails, thus setting
# max_retries to 0.
self.bq_wrapper.wait_for_bq_job(ref, sleep_duration_sec=10, max_retries=0)
return dest_ids_list # Pass the list of destination-jobs downstream
class DeleteTablesFn(beam.DoFn):
def __init__(self, test_client=None):
self.test_client = test_client
def start_bundle(self):
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
def process(self, table_reference):
_LOGGER.info("Deleting table %s", table_reference)
table_reference = bigquery_tools.parse_table_reference(table_reference)
self.bq_wrapper._delete_table(
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId)
class BigQueryBatchFileLoads(beam.PTransform):
"""Takes in a set of elements, and inserts them to BigQuery via batch loads.
"""
DESTINATION_JOBID_PAIRS = 'destination_load_jobid_pairs'
DESTINATION_FILE_PAIRS = 'destination_file_pairs'
DESTINATION_COPY_JOBID_PAIRS = 'destination_copy_jobid_pairs'
def __init__(
self,
destination,
schema=None,
custom_gcs_temp_location=None,
create_disposition=None,
write_disposition=None,
triggering_frequency=None,
temp_file_format=None,
max_file_size=None,
max_files_per_bundle=None,
max_partition_size=None,
max_files_per_partition=None,
additional_bq_parameters=None,
table_side_inputs=None,
schema_side_inputs=None,
test_client=None,
validate=True,
is_streaming_pipeline=False):
self.destination = destination
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.triggering_frequency = triggering_frequency
self.max_file_size = max_file_size or _DEFAULT_MAX_FILE_SIZE
self.max_files_per_bundle = (
max_files_per_bundle or _DEFAULT_MAX_WRITERS_PER_BUNDLE)
self.max_partition_size = max_partition_size or _MAXIMUM_LOAD_SIZE
self.max_files_per_partition = (
max_files_per_partition or _MAXIMUM_SOURCE_URIS)
if (isinstance(custom_gcs_temp_location, str) or
custom_gcs_temp_location is None):
self._custom_gcs_temp_location = vp.StaticValueProvider(
str, custom_gcs_temp_location or '')
elif isinstance(custom_gcs_temp_location, vp.ValueProvider):
self._custom_gcs_temp_location = custom_gcs_temp_location
else:
raise ValueError('custom_gcs_temp_location must be str or ValueProvider')
self.test_client = test_client
self.schema = schema
self._temp_file_format = temp_file_format or bigquery_tools.FileFormat.JSON
# If we have multiple destinations, then we will have multiple load jobs,
# thus we will need temporary tables for atomicity.
self.dynamic_destinations = bool(callable(destination))
self.additional_bq_parameters = additional_bq_parameters or {}
self.table_side_inputs = table_side_inputs or ()
self.schema_side_inputs = schema_side_inputs or ()
self.is_streaming_pipeline = is_streaming_pipeline
self._validate = validate
if self._validate:
self.verify()
def verify(self):
if (isinstance(self._custom_gcs_temp_location.get(), vp.StaticValueProvider)
and not self._custom_gcs_temp_location.get().startswith('gs://')):
# Only fail if the custom location is provided, and it is not a GCS
# location.
raise ValueError(
'Invalid GCS location: %r.\n'
'Writing to BigQuery with FILE_LOADS method requires a '
'GCS location to be provided to write files to be '
'loaded into BigQuery. Please provide a GCS bucket, or '
'pass method="STREAMING_INSERTS" to WriteToBigQuery.' %
self._custom_gcs_temp_location.get())
if self.is_streaming_pipeline and not self.triggering_frequency:
raise ValueError(
'triggering_frequency must be specified to use file'
'loads in streaming')
elif not self.is_streaming_pipeline and self.triggering_frequency:
raise ValueError(
'triggering_frequency can only be used with file'
'loads in streaming')
def _window_fn(self):
"""Set the correct WindowInto PTransform"""
# The user-supplied triggering_frequency is often chosen to control how
# many BigQuery load jobs are triggered, to prevent going over BigQuery's
# daily quota for load jobs. If this is set to a large value, currently we
# have to buffer all the data until the trigger fires. Instead we ensure
# that the files are written if a threshold number of records are ready.
# We use only the user-supplied trigger on the actual BigQuery load.
# This allows us to offload the data to the filesystem.
if self.is_streaming_pipeline:
return beam.WindowInto(beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(
trigger.AfterAny(
trigger.AfterProcessingTime(
self.triggering_frequency),
trigger.AfterCount(
_FILE_TRIGGERING_RECORD_COUNT))),
accumulation_mode=trigger.AccumulationMode\
.DISCARDING)
else:
return beam.WindowInto(beam.window.GlobalWindows())
def _write_files(self, destination_data_kv_pc, file_prefix_pcv):
outputs = (
destination_data_kv_pc
| beam.ParDo(
WriteRecordsToFile(
schema=self.schema,
max_files_per_bundle=self.max_files_per_bundle,
max_file_size=self.max_file_size,
file_format=self._temp_file_format),
file_prefix_pcv,
*self.schema_side_inputs).with_outputs(
WriteRecordsToFile.UNWRITTEN_RECORD_TAG,
WriteRecordsToFile.WRITTEN_FILE_TAG))
# A PCollection of (destination, file) tuples. It lists files with records,
# and the destination each file is meant to be imported into.
destination_files_kv_pc = outputs[WriteRecordsToFile.WRITTEN_FILE_TAG]
# A PCollection of (destination, record) tuples. These are later sharded,
# grouped, and all records for each destination-shard is written to files.
# This PCollection is necessary because not all records can be written into
# files in ``WriteRecordsToFile``.
unwritten_records_pc = outputs[WriteRecordsToFile.UNWRITTEN_RECORD_TAG]
more_destination_files_kv_pc = (
unwritten_records_pc
| beam.ParDo(_ShardDestinations())
| "GroupShardedRows" >> beam.GroupByKey()
| "DropShardNumber" >> beam.Map(lambda x: (x[0][0], x[1]))
| "WriteGroupedRecordsToFile" >> beam.ParDo(
WriteGroupedRecordsToFile(
schema=self.schema, file_format=self._temp_file_format),
file_prefix_pcv,
*self.schema_side_inputs))
# TODO(BEAM-9494): Remove the identity transform. We flatten both
# PCollection paths and use an identity function to work around a
# flatten optimization issue where the wrong coder is being used.
all_destination_file_pairs_pc = (
(destination_files_kv_pc, more_destination_files_kv_pc)
| "DestinationFilesUnion" >> beam.Flatten()
| "IdentityWorkaround" >> beam.Map(lambda x: x))
if self.is_streaming_pipeline:
# Apply the user's trigger back before we start triggering load jobs
all_destination_file_pairs_pc = (
all_destination_file_pairs_pc
| "ApplyUserTrigger" >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(
trigger.AfterAll(
trigger.AfterProcessingTime(self.triggering_frequency),
trigger.AfterCount(1))),
accumulation_mode=trigger.AccumulationMode.DISCARDING))
return all_destination_file_pairs_pc
def _load_data(
self,
partitions_using_temp_tables,
partitions_direct_to_destination,
load_job_name_pcv,
singleton_pc):
"""Load data to BigQuery
Data is loaded into BigQuery in the following two ways:
1. Single partition:
When there is a single partition of files destined to a single
destination, a single load job is triggered.
2. Multiple partitions and/or Dynamic Destinations:
When there are multiple partitions of files destined for a single
destination or when Dynamic Destinations are used, multiple load jobs
need to be triggered for each partition/destination. Load Jobs are
triggered to temporary tables, and those are later copied to the actual
appropriate destination table. This ensures atomicity when only some
of the load jobs would fail but not other. If any of them fails, then
copy jobs are not triggered.
"""
# Load data using temp tables
trigger_loads_outputs = (
partitions_using_temp_tables
| "TriggerLoadJobsWithTempTables" >> beam.ParDo(
TriggerLoadJobs(
schema=self.schema,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
test_client=self.test_client,
temporary_tables=True,
additional_bq_parameters=self.additional_bq_parameters,
source_format=self._temp_file_format),
load_job_name_pcv,
*self.schema_side_inputs).with_outputs(
TriggerLoadJobs.TEMP_TABLES, main='main'))
temp_tables_load_job_ids_pc = trigger_loads_outputs['main']
temp_tables_pc = trigger_loads_outputs[TriggerLoadJobs.TEMP_TABLES]
destination_copy_job_ids_pc = (
singleton_pc
| "WaitForTempTableLoadJobs" >> beam.ParDo(
WaitForBQJobs(self.test_client),
beam.pvalue.AsList(temp_tables_load_job_ids_pc))
| beam.ParDo(
TriggerCopyJobs(
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
test_client=self.test_client),
load_job_name_pcv))
finished_copy_jobs_pc = (
singleton_pc
| "WaitForCopyJobs" >> beam.ParDo(
WaitForBQJobs(self.test_client),
beam.pvalue.AsList(destination_copy_job_ids_pc)))
_ = (
finished_copy_jobs_pc
| "RemoveTempTables/PassTables" >> beam.FlatMap(
lambda x,
deleting_tables: deleting_tables,
pvalue.AsIter(temp_tables_pc))
| "RemoveTempTables/AddUselessValue" >> beam.Map(lambda x: (x, None))
| "RemoveTempTables/DeduplicateTables" >> beam.GroupByKey()
| "RemoveTempTables/GetTableNames" >> beam.Map(lambda elm: elm[0])
| "RemoveTempTables/Delete" >> beam.ParDo(
DeleteTablesFn(self.test_client)))
# Load data directly to destination table
destination_load_job_ids_pc = (
partitions_direct_to_destination
| "TriggerLoadJobsWithoutTempTables" >> beam.ParDo(
TriggerLoadJobs(
schema=self.schema,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
test_client=self.test_client,
temporary_tables=False,
additional_bq_parameters=self.additional_bq_parameters,
source_format=self._temp_file_format),
load_job_name_pcv,
*self.schema_side_inputs))
_ = (
singleton_pc
| "WaitForDestinationLoadJobs" >> beam.ParDo(
WaitForBQJobs(self.test_client),
beam.pvalue.AsList(destination_load_job_ids_pc)))
destination_load_job_ids_pc = (
(temp_tables_load_job_ids_pc, destination_load_job_ids_pc)
| beam.Flatten())
return destination_load_job_ids_pc, destination_copy_job_ids_pc
def expand(self, pcoll):
p = pcoll.pipeline
temp_location = p.options.view_as(GoogleCloudOptions).temp_location
empty_pc = p | "ImpulseEmptyPC" >> beam.Create([])
singleton_pc = p | "ImpulseSingleElementPC" >> beam.Create([None])
load_job_name_pcv = pvalue.AsSingleton(
singleton_pc
| beam.Map(lambda _: _generate_load_job_name()))
file_prefix_pcv = pvalue.AsSingleton(
singleton_pc
| "GenerateFilePrefix" >> beam.Map(
file_prefix_generator(
self._validate, self._custom_gcs_temp_location, temp_location)))
destination_data_kv_pc = (
pcoll
| "RewindowIntoGlobal" >> self._window_fn()
| "AppendDestination" >> beam.ParDo(
bigquery_tools.AppendDestinationsFn(self.destination),
*self.table_side_inputs))
all_destination_file_pairs_pc = self._write_files(
destination_data_kv_pc, file_prefix_pcv)
grouped_files_pc = (
all_destination_file_pairs_pc
| "GroupFilesByTableDestinations" >> beam.GroupByKey())
partitions = (
grouped_files_pc
| beam.ParDo(
PartitionFiles(
self.max_partition_size,
self.max_files_per_partition)).with_outputs(
PartitionFiles.MULTIPLE_PARTITIONS_TAG,
PartitionFiles.SINGLE_PARTITION_TAG))
multiple_partitions_per_destination_pc = partitions[
PartitionFiles.MULTIPLE_PARTITIONS_TAG]
single_partition_per_destination_pc = partitions[
PartitionFiles.SINGLE_PARTITION_TAG]
# When using dynamic destinations, elements with both single as well as
# multiple partitions are loaded into BigQuery using temporary tables to
# ensure atomicity.
if self.dynamic_destinations:
all_partitions = ((
multiple_partitions_per_destination_pc,
single_partition_per_destination_pc)
| "FlattenPartitions" >> beam.Flatten())
destination_load_job_ids_pc, destination_copy_job_ids_pc = self.\
_load_data(all_partitions, empty_pc, load_job_name_pcv,
singleton_pc)
else:
destination_load_job_ids_pc, destination_copy_job_ids_pc = self.\
_load_data(multiple_partitions_per_destination_pc,
single_partition_per_destination_pc,
load_job_name_pcv, singleton_pc)
return {
self.DESTINATION_JOBID_PAIRS: destination_load_job_ids_pc,
self.DESTINATION_FILE_PAIRS: all_destination_file_pairs_pc,
self.DESTINATION_COPY_JOBID_PAIRS: destination_copy_job_ids_pc,
}
|
iemejia/incubator-beam
|
sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
|
Python
|
apache-2.0
| 34,932
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Console proxy."""
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova.console import api as console_api
from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import importutils
from nova import test
CONF = cfg.CONF
CONF.import_opt('console_manager', 'nova.service')
CONF.import_opt('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
"""Test case for console proxy manager."""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
self.console = importutils.import_object(CONF.console_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.host = 'test_compute_host'
def _create_instance(self):
"""Create a test instance."""
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = 1
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)
def test_get_pool_for_instance_host(self):
pool = self.console._get_pool_for_instance_host(self.context,
self.host)
self.assertEqual(pool['compute_host'], self.host)
def test_get_pool_creates_new_pool_if_needed(self):
self.assertRaises(exception.NotFound,
db.console_pool_get_by_host_type,
self.context,
self.host,
self.console.host,
self.console.driver.console_type)
pool = self.console._get_pool_for_instance_host(self.context,
self.host)
pool2 = db.console_pool_get_by_host_type(self.context,
self.host,
self.console.host,
self.console.driver.console_type)
self.assertEqual(pool['id'], pool2['id'])
def test_get_pool_does_not_create_new_pool_if_exists(self):
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass',
'host': self.console.host,
'console_type': self.console.driver.console_type,
'compute_host': 'sometesthostname'}
new_pool = db.console_pool_create(self.context, pool_info)
pool = self.console._get_pool_for_instance_host(self.context,
'sometesthostname')
self.assertEqual(pool['id'], new_pool['id'])
def test_add_console(self):
instance = self._create_instance()
self.console.add_console(self.context, instance['id'])
instance = db.instance_get(self.context, instance['id'])
pool = db.console_pool_get_by_host_type(self.context,
instance['host'], self.console.host,
self.console.driver.console_type)
console_instances = [con['instance_uuid'] for con in pool['consoles']]
self.assertIn(instance['uuid'], console_instances)
db.instance_destroy(self.context, instance['uuid'])
def test_add_console_does_not_duplicate(self):
instance = self._create_instance()
cons1 = self.console.add_console(self.context, instance['id'])
cons2 = self.console.add_console(self.context, instance['id'])
self.assertEqual(cons1, cons2)
db.instance_destroy(self.context, instance['uuid'])
def test_remove_console(self):
instance = self._create_instance()
console_id = self.console.add_console(self.context, instance['id'])
self.console.remove_console(self.context, console_id)
self.assertRaises(exception.NotFound,
db.console_get,
self.context,
console_id)
db.instance_destroy(self.context, instance['uuid'])
class ConsoleAPITestCase(test.TestCase):
"""Test case for console API."""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.console_api = console_api.API()
self.fake_uuid = '00000000-aaaa-bbbb-cccc-000000000000'
self.fake_instance = {
'id': 1,
'uuid': self.fake_uuid,
'host': 'fake_host'
}
self.fake_console = {
'pool': {'host': 'fake_host'},
'id': 'fake_id'
}
def _fake_db_console_get(_ctxt, _console_uuid, _instance_uuid):
return self.fake_console
self.stubs.Set(db, 'console_get', _fake_db_console_get)
def _fake_db_console_get_all_by_instance(_ctxt, _instance_uuid,
columns_to_join):
return [self.fake_console]
self.stubs.Set(db, 'console_get_all_by_instance',
_fake_db_console_get_all_by_instance)
def _fake_instance_get_by_uuid(_ctxt, _instance_uuid):
return self.fake_instance
self.stubs.Set(db, 'instance_get_by_uuid', _fake_instance_get_by_uuid)
def test_get_consoles(self):
console = self.console_api.get_consoles(self.context, self.fake_uuid)
self.assertEqual(console, [self.fake_console])
def test_get_console(self):
console = self.console_api.get_console(self.context, self.fake_uuid,
'fake_id')
self.assertEqual(console, self.fake_console)
def test_delete_console(self):
self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'remove_console')
console_rpcapi.ConsoleAPI.remove_console(self.context, 'fake_id')
self.mox.ReplayAll()
self.console_api.delete_console(self.context, self.fake_uuid,
'fake_id')
def test_create_console(self):
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI,
'get_console_topic')
compute_rpcapi.ComputeAPI.get_console_topic(
self.context, 'fake_host').AndReturn('compute.fake_host')
self.mox.StubOutClassWithMocks(console_rpcapi, 'ConsoleAPI')
console_api_mock = console_rpcapi.ConsoleAPI(
topic='compute.fake_host')
console_api_mock.add_console(self.context,
self.fake_instance['id'])
self.mox.ReplayAll()
self.console_api.create_console(self.context, self.fake_uuid)
|
sacharya/nova
|
nova/tests/console/test_console.py
|
Python
|
apache-2.0
| 7,724
|
import pandas as pd
from base import filter_test, ConstantDataSource
def test_simple_filter():
df = pd.DataFrame(
{
't1': [1, 2, 3, 4, 5],
't2': [-1, -2, -3, 5, 7]
}
)
result_df = pd.DataFrame(
{
't4': [2, 4],
't5': [-1, -4],
't6': [0, 0]
}
).sort_index(axis=1)
filter_test.append(ConstantDataSource, construct_arguments=[df])
output_df = filter_test.process(output_channels=['root']).sort_index(axis=1)
assert output_df.equals(result_df)
|
SirEdvin/Pandas-Pipe
|
tests/filter_test.py
|
Python
|
apache-2.0
| 561
|
# Copyright 2013 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
@test.attr(type='gate')
def test_project_create_with_description(self):
# Create project with a description
project_name = data_utils.rand_name('project-')
project_desc = data_utils.rand_name('desc-')
project = self.client.create_project(
project_name, description=project_desc)
self.data.projects.append(project)
project_id = project['id']
desc1 = project['description']
self.assertEqual(desc1, project_desc, 'Description should have '
'been sent in response for create')
body = self.client.get_project(project_id)
desc2 = body['description']
self.assertEqual(desc2, project_desc, 'Description does not appear'
'to be set')
@test.attr(type='gate')
def test_project_create_with_domain(self):
# Create project with a domain
self.data.setup_test_domain()
project_name = data_utils.rand_name('project')
project = self.client.create_project(
project_name, domain_id=self.data.domain['id'])
self.data.projects.append(project)
project_id = project['id']
self.assertEqual(project_name, project['name'])
self.assertEqual(self.data.domain['id'], project['domain_id'])
body = self.client.get_project(project_id)
self.assertEqual(project_name, body['name'])
self.assertEqual(self.data.domain['id'], body['domain_id'])
@test.attr(type='gate')
def test_project_create_enabled(self):
# Create a project that is enabled
project_name = data_utils.rand_name('project-')
project = self.client.create_project(
project_name, enabled=True)
self.data.projects.append(project)
project_id = project['id']
en1 = project['enabled']
self.assertTrue(en1, 'Enable should be True in response')
body = self.client.get_project(project_id)
en2 = body['enabled']
self.assertTrue(en2, 'Enable should be True in lookup')
@test.attr(type='gate')
def test_project_create_not_enabled(self):
# Create a project that is not enabled
project_name = data_utils.rand_name('project-')
project = self.client.create_project(
project_name, enabled=False)
self.data.projects.append(project)
en1 = project['enabled']
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
body = self.client.get_project(project['id'])
en2 = body['enabled']
self.assertEqual('false', str(en2).lower(),
'Enable should be False in lookup')
@test.attr(type='gate')
def test_project_update_name(self):
# Update name attribute of a project
p_name1 = data_utils.rand_name('project-')
project = self.client.create_project(p_name1)
self.data.projects.append(project)
resp1_name = project['name']
p_name2 = data_utils.rand_name('project2-')
body = self.client.update_project(project['id'], name=p_name2)
resp2_name = body['name']
self.assertNotEqual(resp1_name, resp2_name)
body = self.client.get_project(project['id'])
resp3_name = body['name']
self.assertNotEqual(resp1_name, resp3_name)
self.assertEqual(p_name1, resp1_name)
self.assertEqual(resp2_name, resp3_name)
@test.attr(type='gate')
def test_project_update_desc(self):
# Update description attribute of a project
p_name = data_utils.rand_name('project-')
p_desc = data_utils.rand_name('desc-')
project = self.client.create_project(
p_name, description=p_desc)
self.data.projects.append(project)
resp1_desc = project['description']
p_desc2 = data_utils.rand_name('desc2-')
body = self.client.update_project(
project['id'], description=p_desc2)
resp2_desc = body['description']
self.assertNotEqual(resp1_desc, resp2_desc)
body = self.client.get_project(project['id'])
resp3_desc = body['description']
self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(p_desc, resp1_desc)
self.assertEqual(resp2_desc, resp3_desc)
@test.attr(type='gate')
def test_project_update_enable(self):
# Update the enabled attribute of a project
p_name = data_utils.rand_name('project-')
p_en = False
project = self.client.create_project(p_name, enabled=p_en)
self.data.projects.append(project)
resp1_en = project['enabled']
p_en2 = True
body = self.client.update_project(
project['id'], enabled=p_en2)
resp2_en = body['enabled']
self.assertNotEqual(resp1_en, resp2_en)
body = self.client.get_project(project['id'])
resp3_en = body['enabled']
self.assertNotEqual(resp1_en, resp3_en)
self.assertEqual('false', str(resp1_en).lower())
self.assertEqual(resp2_en, resp3_en)
@test.attr(type='gate')
def test_associate_user_to_project(self):
# Associate a user to a project
# Create a Project
p_name = data_utils.rand_name('project-')
project = self.client.create_project(p_name)
self.data.projects.append(project)
# Create a User
u_name = data_utils.rand_name('user-')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = data_utils.rand_name('pass-')
user = self.client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=project['id'])
# Delete the User at the end of this method
self.addCleanup(self.client.delete_user, user['id'])
# Get User To validate the user details
new_user_get = self.client.get_user(user['id'])
# Assert response body of GET
self.assertEqual(u_name, new_user_get['name'])
self.assertEqual(u_desc, new_user_get['description'])
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
|
Vaidyanath/tempest
|
tempest/api/identity/admin/v3/test_projects.py
|
Python
|
apache-2.0
| 7,060
|
from nets import vgg
import tensorflow as tf
from preprocessing import vgg_preprocessing
from ..utils.upsampling import bilinear_upsample_weights
slim = tf.contrib.slim
# Mean values for VGG-16
from preprocessing.vgg_preprocessing import _R_MEAN, _G_MEAN, _B_MEAN
def FCN_16s(image_batch_tensor,
number_of_classes,
is_training):
"""Returns the FCN-16s model definition.
The function returns the model definition of a network that was described
in 'Fully Convolutional Networks for Semantic Segmentation' by Long et al.
The network subsamples the input by a factor of 32 and uses two bilinear
upsampling layers to upsample prediction by a factor of 32. This means that
if the image size is not of the factor 32, the prediction of different size
will be delivered. To adapt the network for an any size input use
adapt_network_for_any_size_input(FCN_16s, 32). Note: the upsampling kernel
is fixed in this model definition, because it didn't give significant
improvements according to aforementioned paper.
Parameters
----------
image_batch_tensor : [batch_size, height, width, depth] Tensor
Tensor specifying input image batch
number_of_classes : int
An argument specifying the number of classes to be predicted.
For example, for PASCAL VOC it is 21.
is_training : boolean
An argument specifying if the network is being evaluated or trained.
It affects the work of underlying dropout layer of VGG-16.
Returns
-------
upsampled_logits : [batch_size, height, width, number_of_classes] Tensor
Tensor with logits representing predictions for each class.
Be careful, the output can be of different size compared to input,
use adapt_network_for_any_size_input to adapt network for any input size.
Otherwise, the input images sizes should be of multiple 32.
fcn_32s_variables_mapping : dict {string: variable}
Dict which maps the FCN-16s model's variables to FCN-32s checkpoint variables
names. We need this to initilize the weights of FCN-16s model with FCN-32s from
checkpoint file. Look at ipython notebook for examples.
"""
# Convert image to float32 before subtracting the
# mean pixel value
image_batch_float = tf.to_float(image_batch_tensor)
# Subtract the mean pixel value from each pixel
mean_centered_image_batch = image_batch_float - [_R_MEAN, _G_MEAN, _B_MEAN]
upsample_filter_factor_2_np = bilinear_upsample_weights(factor=2,
number_of_classes=number_of_classes)
upsample_filter_factor_16_np = bilinear_upsample_weights(factor=16,
number_of_classes=number_of_classes)
upsample_filter_factor_2_tensor = tf.constant(upsample_filter_factor_2_np)
upsample_filter_factor_16_tensor = tf.constant(upsample_filter_factor_16_np)
with tf.variable_scope("fcn_16s") as fcn_16s_scope:
# Define the model that we want to use -- specify to use only two classes at the last layer
# TODO: make pull request to get this custom vgg feature accepted
# to avoid using custom slim repo.
with slim.arg_scope(vgg.vgg_arg_scope()):
## Original FCN-32s model definition
last_layer_logits, end_points = vgg.vgg_16(mean_centered_image_batch,
num_classes=number_of_classes,
is_training=is_training,
spatial_squeeze=False,
fc_conv_padding='SAME')
last_layer_logits_shape = tf.shape(last_layer_logits)
# Calculate the ouput size of the upsampled tensor
last_layer_upsampled_by_factor_2_logits_shape = tf.pack([
last_layer_logits_shape[0],
last_layer_logits_shape[1] * 2,
last_layer_logits_shape[2] * 2,
last_layer_logits_shape[3]
])
# Perform the upsampling
last_layer_upsampled_by_factor_2_logits = tf.nn.conv2d_transpose(last_layer_logits,
upsample_filter_factor_2_tensor,
output_shape=last_layer_upsampled_by_factor_2_logits_shape,
strides=[1, 2, 2, 1])
## Adding the skip here for FCN-16s model
pool4_features = end_points['fcn_16s/vgg_16/pool4']
# We zero initialize the weights to start training with the same
# accuracy that we ended training FCN-32s
pool4_logits = slim.conv2d(pool4_features,
number_of_classes,
[1, 1],
activation_fn=None,
normalizer_fn=None,
weights_initializer=tf.zeros_initializer,
scope='pool4_fc')
fused_last_layer_and_pool4_logits = pool4_logits + last_layer_upsampled_by_factor_2_logits
fused_last_layer_and_pool4_logits_shape = tf.shape(fused_last_layer_and_pool4_logits)
# Calculate the ouput size of the upsampled tensor
fused_last_layer_and_pool4_upsampled_by_factor_16_logits_shape = tf.pack([
fused_last_layer_and_pool4_logits_shape[0],
fused_last_layer_and_pool4_logits_shape[1] * 16,
fused_last_layer_and_pool4_logits_shape[2] * 16,
fused_last_layer_and_pool4_logits_shape[3]
])
# Perform the upsampling
fused_last_layer_and_pool4_upsampled_by_factor_16_logits = tf.nn.conv2d_transpose(fused_last_layer_and_pool4_logits,
upsample_filter_factor_16_tensor,
output_shape=fused_last_layer_and_pool4_upsampled_by_factor_16_logits_shape,
strides=[1, 16, 16, 1])
fcn_32s_variables_mapping = {}
fcn_16s_variables = slim.get_variables(fcn_16s_scope)
for variable in fcn_16s_variables:
# We only need FCN-32s variables to resture from checkpoint
# Variables of FCN-16s should be initialized
if 'pool4_fc' in variable.name:
continue
# Here we remove the part of a name of the variable
# that is responsible for the current variable scope
original_fcn_32s_checkpoint_string = 'fcn_32s/' + variable.name[len(fcn_16s_scope.original_name_scope):-2]
fcn_32s_variables_mapping[original_fcn_32s_checkpoint_string] = variable
return fused_last_layer_and_pool4_upsampled_by_factor_16_logits, fcn_32s_variables_mapping
|
aakashsinha19/Aspectus
|
Image Segmentation/tf-image-segmentation/tf_image_segmentation/models/fcn_16s.py
|
Python
|
apache-2.0
| 7,835
|
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2015, 2016 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio.post_policy
~~~~~~~~~~~~~~~
This module contains :class:`PostPolicy <PostPolicy>` implementation.
:copyright: (c) 2015 by Minio, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
import base64
import json
import datetime
from .helpers import (is_non_empty_string, is_valid_bucket_name)
from .error import InvalidArgumentError
# Policy explanation:
# http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
class PostPolicy(object):
"""
A :class:`PostPolicy <PostPolicy>` object for constructing
Amazon S3 POST policy JSON string.
"""
def __init__(self):
self._expiration = None
self._content_length_range = tuple()
# publicly accessible
self.policies = []
self.form_data = dict()
self.bucket_name = ''
self.key = ''
def set_expires(self, time):
"""
Set expiration time :class:`datetime.datetime`.
:param time: set expiration :class:`datetime.datetime`.
"""
if time.toordinal() < 1:
ValueError()
self._expiration = time
def set_key(self, key):
"""
Set key policy condition.
:param key: set key name.
"""
is_non_empty_string(key)
self.policies.append(('eq', '$key', key))
self.form_data['key'] = key
self.key = key
def set_key_startswith(self, key_startswith):
"""
Set key startswith policy condition.
:param key_startswith: set key prefix name.
"""
is_non_empty_string(key_startswith)
self.policies.append(('starts-with', '$key', key_startswith))
self.form_data['key'] = key_startswith
def set_bucket_name(self, bucket_name):
"""
Set bucket name policy condition.
:param bucket_name: set bucket name.
"""
is_valid_bucket_name(bucket_name)
self.policies.append(('eq', '$bucket', bucket_name))
self.form_data['bucket'] = bucket_name
self.bucket_name = bucket_name
def set_content_type(self, content_type):
"""
Set content-type policy condition.
:param content_type: set content type name.
"""
self.policies.append(('eq', '$Content-Type', content_type))
self.form_data['Content-Type'] = content_type
def set_content_length_range(self, min_length, max_length):
"""
Set content length range policy condition.
Raise :exc:`ValueError` for invalid inputs.
:param min_length: Minimum length limit for content size.
:param max_length: Maximum length limit for content size.
"""
err_msg = ('Min-length ({}) must be <= Max-length ({}), '
'and they must be non-negative.').format(
min_length, max_length
)
if min_length > max_length or min_length < 0 or max_length < 0:
raise ValueError(err_msg)
self._content_length_range = (min_length, max_length)
def append_policy(self, condition, target, value):
self.policies.append([condition, target, value])
def _marshal_json(self, extras=()):
"""
Marshal various policies into json str/bytes.
"""
policies = self.policies[:]
policies.extend(extras)
if self._content_length_range:
policies.append(['content-length-range'] +
list(self._content_length_range))
policy_stmt = {
"expiration": self._expiration.strftime(
"%Y-%m-%dT%H:%M:%S.000Z"),
}
if len(policies) > 0:
policy_stmt["conditions"] = policies
return json.dumps(policy_stmt)
def base64(self, extras=()):
"""
Encode json into base64.
"""
s = self._marshal_json(extras=extras)
s_bytes = s if isinstance(s, bytes) else s.encode('utf-8')
b64enc = base64.b64encode(s_bytes)
return b64enc.decode('utf-8') if isinstance(b64enc, bytes) else b64enc
def is_valid(self):
"""
Validate for required parameters.
"""
if not isinstance(self._expiration, datetime.datetime):
raise InvalidArgumentError('Expiration datetime must be specified.')
if 'key' not in self.form_data:
raise InvalidArgumentError('object key must be specified.')
if 'bucket' not in self.form_data:
raise InvalidArgumentError('bucket name must be specified.')
|
NitishT/minio-py
|
minio/post_policy.py
|
Python
|
apache-2.0
| 5,211
|
import json
import logging
import os
from numbers import Number
from typing import Any, Dict, List, Optional, Tuple
from ray.tune.utils import flatten_dict
try:
import pandas as pd
from pandas import DataFrame
except ImportError:
pd = None
DataFrame = None
from ray.tune.error import TuneError
from ray.tune.result import EXPR_PROGRESS_FILE, EXPR_PARAM_FILE,\
CONFIG_PREFIX, TRAINING_ITERATION
from ray.tune.trial import Trial
from ray.tune.trainable import TrainableUtil
logger = logging.getLogger(__name__)
class Analysis:
"""Analyze all results from a directory of experiments.
To use this class, the experiment must be executed with the JsonLogger.
Args:
experiment_dir (str): Directory of the experiment to load.
default_metric (str): Default metric for comparing results. Can be
overwritten with the ``metric`` parameter in the respective
functions.
default_mode (str): Default mode for comparing results. Has to be one
of [min, max]. Can be overwritten with the ``mode`` parameter
in the respective functions.
"""
def __init__(self,
experiment_dir: str,
default_metric: Optional[str] = None,
default_mode: Optional[str] = None):
experiment_dir = os.path.expanduser(experiment_dir)
if not os.path.isdir(experiment_dir):
raise ValueError(
"{} is not a valid directory.".format(experiment_dir))
self._experiment_dir = experiment_dir
self._configs = {}
self._trial_dataframes = {}
self.default_metric = default_metric
if default_mode and default_mode not in ["min", "max"]:
raise ValueError(
"`default_mode` has to be None or one of [min, max]")
self.default_mode = default_mode
if not pd:
logger.warning(
"pandas not installed. Run `pip install pandas` for "
"Analysis utilities.")
else:
self.fetch_trial_dataframes()
def _validate_metric(self, metric: str) -> str:
if not metric and not self.default_metric:
raise ValueError(
"No `metric` has been passed and `default_metric` has "
"not been set. Please specify the `metric` parameter.")
return metric or self.default_metric
def _validate_mode(self, mode: str) -> str:
if not mode and not self.default_mode:
raise ValueError(
"No `mode` has been passed and `default_mode` has "
"not been set. Please specify the `mode` parameter.")
if mode and mode not in ["min", "max"]:
raise ValueError("If set, `mode` has to be one of [min, max]")
return mode or self.default_mode
def dataframe(self,
metric: Optional[str] = None,
mode: Optional[str] = None) -> DataFrame:
"""Returns a pandas.DataFrame object constructed from the trials.
Args:
metric (str): Key for trial info to order on.
If None, uses last result.
mode (str): One of [min, max].
Returns:
pd.DataFrame: Constructed from a result dict of each trial.
"""
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs(prefix=True)
for path, config in all_configs.items():
if path in rows:
rows[path].update(config)
rows[path].update(logdir=path)
return pd.DataFrame(list(rows.values()))
def get_best_config(self,
metric: Optional[str] = None,
mode: Optional[str] = None) -> Optional[Dict]:
"""Retrieve the best config corresponding to the trial.
Args:
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to
``self.default_mode``.
"""
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)
rows = self._retrieve_rows(metric=metric, mode=mode)
if not rows:
# only nans encountered when retrieving rows
logger.warning("Not able to retrieve the best config for {} "
"according to the specified metric "
"(only nans encountered).".format(
self._experiment_dir))
return None
all_configs = self.get_all_configs()
compare_op = max if mode == "max" else min
best_path = compare_op(rows, key=lambda k: rows[k][metric])
return all_configs[best_path]
def get_best_logdir(self,
metric: Optional[str] = None,
mode: Optional[str] = None) -> Optional[str]:
"""Retrieve the logdir corresponding to the best trial.
Args:
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
"""
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)
assert mode in ["max", "min"]
df = self.dataframe(metric=metric, mode=mode)
mode_idx = pd.Series.idxmax if mode == "max" else pd.Series.idxmin
try:
return df.iloc[mode_idx(df[metric])].logdir
except KeyError:
# all dirs contains only nan values
# for the specified metric
# -> df is an empty dataframe
logger.warning("Not able to retrieve the best logdir for {} "
"according to the specified metric "
"(only nans encountered).".format(
self._experiment_dir))
return None
def fetch_trial_dataframes(self) -> Dict[str, DataFrame]:
fail_count = 0
for path in self._get_trial_paths():
try:
self.trial_dataframes[path] = pd.read_csv(
os.path.join(path, EXPR_PROGRESS_FILE))
except Exception:
fail_count += 1
if fail_count:
logger.debug(
"Couldn't read results from {} paths".format(fail_count))
return self.trial_dataframes
def get_all_configs(self, prefix: bool = False) -> Dict[str, Dict]:
"""Returns a list of all configurations.
Args:
prefix (bool): If True, flattens the config dict
and prepends `config/`.
Returns:
Dict[str, Dict]: Dict of all configurations of trials, indexed by
their trial dir.
"""
fail_count = 0
for path in self._get_trial_paths():
try:
with open(os.path.join(path, EXPR_PARAM_FILE)) as f:
config = json.load(f)
if prefix:
for k in list(config):
config[CONFIG_PREFIX + k] = config.pop(k)
self._configs[path] = config
except Exception:
fail_count += 1
if fail_count:
logger.warning(
"Couldn't read config from {} paths".format(fail_count))
return self._configs
def get_trial_checkpoints_paths(self,
trial: Trial,
metric: Optional[str] = None
) -> List[Tuple[str, Number]]:
"""Gets paths and metrics of all persistent checkpoints of a trial.
Args:
trial (Trial): The log directory of a trial, or a trial instance.
metric (str): key for trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default if no value was
passed to ``self.default_metric``.
Returns:
List of [path, metric] for all persistent checkpoints of the trial.
"""
metric = metric or self.default_metric or TRAINING_ITERATION
if isinstance(trial, str):
trial_dir = os.path.expanduser(trial)
# Get checkpoints from logdir.
chkpt_df = TrainableUtil.get_checkpoints_paths(trial_dir)
# Join with trial dataframe to get metrics.
trial_df = self.trial_dataframes[trial_dir]
path_metric_df = chkpt_df.merge(
trial_df, on="training_iteration", how="inner")
return path_metric_df[["chkpt_path", metric]].values.tolist()
elif isinstance(trial, Trial):
checkpoints = trial.checkpoint_manager.best_checkpoints()
return [(c.value, c.result[metric]) for c in checkpoints]
else:
raise ValueError("trial should be a string or a Trial instance.")
def get_best_checkpoint(self,
trial: Trial,
metric: Optional[str] = None,
mode: Optional[str] = None) -> Optional[str]:
"""Gets best persistent checkpoint path of provided trial.
Args:
trial (Trial): The log directory of a trial, or a trial instance.
metric (str): key of trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default if no value was
passed to ``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
Returns:
Path for best checkpoint of trial determined by metric
"""
metric = metric or self.default_metric or TRAINING_ITERATION
mode = self._validate_mode(mode)
checkpoint_paths = self.get_trial_checkpoints_paths(trial, metric)
if not checkpoint_paths:
logger.error(f"No checkpoints have been found for trial {trial}.")
return None
if mode == "max":
return max(checkpoint_paths, key=lambda x: x[1])[0]
else:
return min(checkpoint_paths, key=lambda x: x[1])[0]
def _retrieve_rows(self,
metric: Optional[str] = None,
mode: Optional[str] = None) -> Dict[str, Any]:
assert mode is None or mode in ["max", "min"]
rows = {}
for path, df in self.trial_dataframes.items():
if mode == "max":
idx = df[metric].idxmax()
elif mode == "min":
idx = df[metric].idxmin()
else:
idx = -1
try:
rows[path] = df.iloc[idx].to_dict()
except TypeError:
# idx is nan
logger.warning(
"Warning: Non-numerical value(s) encountered for {}".
format(path))
return rows
def _get_trial_paths(self) -> List[str]:
_trial_paths = []
for trial_path, _, files in os.walk(self._experiment_dir):
if EXPR_PROGRESS_FILE in files:
_trial_paths += [trial_path]
if not _trial_paths:
raise TuneError("No trials found in {}.".format(
self._experiment_dir))
return _trial_paths
@property
def trial_dataframes(self) -> Dict[str, DataFrame]:
"""List of all dataframes of the trials."""
return self._trial_dataframes
class ExperimentAnalysis(Analysis):
"""Analyze results from a Tune experiment.
To use this class, the experiment must be executed with the JsonLogger.
Parameters:
experiment_checkpoint_path (str): Path to a json file
representing an experiment state. Corresponds to
Experiment.local_dir/Experiment.name/experiment_state.json
trials (list|None): List of trials that can be accessed via
`analysis.trials`.
default_metric (str): Default metric for comparing results. Can be
overwritten with the ``metric`` parameter in the respective
functions.
default_mode (str): Default mode for comparing results. Has to be one
of [min, max]. Can be overwritten with the ``mode`` parameter
in the respective functions.
Example:
>>> tune.run(my_trainable, name="my_exp", local_dir="~/tune_results")
>>> analysis = ExperimentAnalysis(
>>> experiment_checkpoint_path="~/tune_results/my_exp/state.json")
"""
def __init__(self,
experiment_checkpoint_path: str,
trials: Optional[List[Trial]] = None,
default_metric: Optional[str] = None,
default_mode: Optional[str] = None):
experiment_checkpoint_path = os.path.expanduser(
experiment_checkpoint_path)
if not os.path.isfile(experiment_checkpoint_path):
raise ValueError(
"{} is not a valid file.".format(experiment_checkpoint_path))
with open(experiment_checkpoint_path) as f:
_experiment_state = json.load(f)
self._experiment_state = _experiment_state
if "checkpoints" not in _experiment_state:
raise TuneError("Experiment state invalid; no checkpoints found.")
self._checkpoints = _experiment_state["checkpoints"]
self.trials = trials
super(ExperimentAnalysis, self).__init__(
os.path.dirname(experiment_checkpoint_path), default_metric,
default_mode)
@property
def best_trial(self) -> Trial:
"""Get the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode, scope)` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_trial`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_trial(metric, mode)` method to set the metric "
"and mode explicitly.")
return self.get_best_trial(self.default_metric, self.default_mode)
@property
def best_config(self) -> Dict:
"""Get the config of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_config(metric, mode, scope)` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_config`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_config(metric, mode)` method to set the metric "
"and mode explicitly.")
return self.get_best_config(self.default_metric, self.default_mode)
@property
def best_checkpoint(self) -> str:
"""Get the checkpoint path of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_checkpoint(trial, metric, mode)` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_checkpoint`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_checkpoint(trial, metric, mode)` method to set the "
"metric and mode explicitly.")
best_trial = self.best_trial
return self.get_best_checkpoint(best_trial, self.default_metric,
self.default_mode)
@property
def best_logdir(self) -> str:
"""Get the logdir of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_logdir(metric, mode)` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_logdir`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use the "
"`get_best_logdir(metric, mode, scope)` method to set the "
"metric and mode explicitly.")
return self.get_best_logdir(self.default_metric, self.default_mode)
@property
def best_dataframe(self) -> DataFrame:
"""Get the full result dataframe of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_logdir(metric, mode)` and use it to look for the dataframe
in the `self.trial_dataframes` dict.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_result`, pass a `metric` and `mode` "
"parameter to `tune.run()`.")
best_logdir = self.best_logdir
return self.trial_dataframes[best_logdir]
@property
def best_result(self) -> Dict:
"""Get the last result of the best trial of the experiment
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode, scope).last_result` instead.
"""
if not self.default_metric or not self.default_mode:
raise ValueError(
"To fetch the `best_result`, pass a `metric` and `mode` "
"parameter to `tune.run()`. Alternatively, use "
"`get_best_trial(metric, mode).last_result` to set "
"the metric and mode explicitly and fetch the last result.")
return self.best_trial.last_result
@property
def best_result_df(self) -> DataFrame:
"""Get the best result of the experiment as a pandas dataframe.
The best trial is determined by comparing the last trial results
using the `metric` and `mode` parameters passed to `tune.run()`.
If you didn't pass these parameters, use
`get_best_trial(metric, mode, scope).last_result` instead.
"""
if not pd:
raise ValueError("`best_result_df` requires pandas. Install with "
"`pip install pandas`.")
best_result = flatten_dict(self.best_result, delimiter=".")
return pd.DataFrame.from_records([best_result], index="trial_id")
@property
def results(self) -> Dict[str, Dict]:
"""Get the last result of the all trials of the experiment"""
return {trial.trial_id: trial.last_result for trial in self.trials}
@property
def results_df(self) -> DataFrame:
if not pd:
raise ValueError("`best_result_df` requires pandas. Install with "
"`pip install pandas`.")
return pd.DataFrame.from_records(
[
flatten_dict(trial.last_result, delimiter=".")
for trial in self.trials
],
index="trial_id")
def get_best_trial(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
scope: str = "last") -> Optional[Trial]:
"""Retrieve the best trial object.
Compares all trials' scores on ``metric``.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.
Args:
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
scope (str): One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
If `scope=avg`, consider the simple average over all steps
for `metric` and compare across trials based on
`mode=[min,max]`. If `scope=last-5-avg` or `scope=last-10-avg`,
consider the simple average over the last 5 or 10 steps for
`metric` and compare across trials based on `mode=[min,max]`.
If `scope=all`, find each trial's min/max score for `metric`
based on `mode`, and compare trials based on `mode=[min,max]`.
"""
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)
if scope not in ["all", "last", "avg", "last-5-avg", "last-10-avg"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for scope {} not in [\"all\", \"last\", \"avg\", "
"\"last-5-avg\", \"last-10-avg\"]. "
"If you didn't pass a `metric` parameter to `tune.run()`, "
"you have to pass one when fetching the best trial.".format(
metric, scope))
best_trial = None
best_metric_score = None
for trial in self.trials:
if metric not in trial.metric_analysis:
continue
if scope in ["last", "avg", "last-5-avg", "last-10-avg"]:
metric_score = trial.metric_analysis[metric][scope]
else:
metric_score = trial.metric_analysis[metric][mode]
if best_metric_score is None:
best_metric_score = metric_score
best_trial = trial
continue
if (mode == "max") and (best_metric_score < metric_score):
best_metric_score = metric_score
best_trial = trial
elif (mode == "min") and (best_metric_score > metric_score):
best_metric_score = metric_score
best_trial = trial
if not best_trial:
logger.warning(
"Could not find best trial. Did you pass the correct `metric`"
"parameter?")
return best_trial
def get_best_config(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
scope: str = "last") -> Optional[Dict]:
"""Retrieve the best config corresponding to the trial.
Compares all trials' scores on `metric`.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.
Args:
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
scope (str): One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
If `scope=avg`, consider the simple average over all steps
for `metric` and compare across trials based on
`mode=[min,max]`. If `scope=last-5-avg` or `scope=last-10-avg`,
consider the simple average over the last 5 or 10 steps for
`metric` and compare across trials based on `mode=[min,max]`.
If `scope=all`, find each trial's min/max score for `metric`
based on `mode`, and compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.config if best_trial else None
def get_best_logdir(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
scope: str = "last") -> Optional[str]:
"""Retrieve the logdir corresponding to the best trial.
Compares all trials' scores on `metric`.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.
Args:
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
scope (str): One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
If `scope=avg`, consider the simple average over all steps
for `metric` and compare across trials based on
`mode=[min,max]`. If `scope=last-5-avg` or `scope=last-10-avg`,
consider the simple average over the last 5 or 10 steps for
`metric` and compare across trials based on `mode=[min,max]`.
If `scope=all`, find each trial's min/max score for `metric`
based on `mode`, and compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.logdir if best_trial else None
def stats(self) -> Dict:
"""Returns a dictionary of the statistics of the experiment."""
return self._experiment_state.get("stats")
def runner_data(self) -> Dict:
"""Returns a dictionary of the TrialRunner data."""
return self._experiment_state.get("runner_data")
def _get_trial_paths(self) -> List[str]:
"""Overwrites Analysis to only have trials of one experiment."""
if self.trials:
_trial_paths = [t.logdir for t in self.trials]
else:
logger.warning("No `self.trials`. Drawing logdirs from checkpoint "
"file. This may result in some information that is "
"out of sync, as checkpointing is periodic.")
_trial_paths = [
checkpoint["logdir"] for checkpoint in self._checkpoints
]
if not _trial_paths:
raise TuneError("No trials found.")
return _trial_paths
|
robertnishihara/ray
|
python/ray/tune/analysis/experiment_analysis.py
|
Python
|
apache-2.0
| 27,440
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Copyright (C) 2017 Chuan Ji <jichu4n@gmail.com> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""Sender Rewriting Scheme (SRS) library for Python."""
from typing import List, Tuple, Union
import base64
import datetime
import hashlib
import hmac
import re
import string
import time
class Error(Exception):
"""Base class for SRS errors."""
class InvalidAddressError(Error, ValueError):
"""Invalid email address."""
class InvalidHashError(Error):
"""Invalid hash in an SRS address."""
class InvalidTimestampError(Error):
"""Invalid timestamp in an SRS address."""
class SRS(object):
"""A Sender Rewriting Scheme (SRS) instance.
This class implements the Guarded scheme described in the original SRS paper
at http://www.libsrs2.org/srs/srs.pdf, with sensible defaults derived from the
the canonical libsrs2 C implementation.
Example usage:
.. code-block:: python
srs = SRS('secret_key')
# Rewrites an email from alice@A.com to B.com
rewritten_addr = srs.forward('alice@A.com', 'B.com')
# Reverse it to get the address to bounce to.
bounce_addr = srs.reverse(rewritten_addr)
"""
# Regular expression matching SRS0 and SRS1 addresses.
_SRS0_OPAQUE = re.compile(r"""
SRS0 # Prefix tag
([-+=].+) # Opaque part w/ leading separator, generated by 1st hop
""", re.IGNORECASE | re.VERBOSE)
_SRS0 = re.compile(r"""
SRS0 # Prefix tag
[-+=]
([^=]+) # Hash
=
([^=]+) # Timestamp
=
([^=]+) # Envelope sender host
=
(.+) # Envelope sender local part
""", re.IGNORECASE | re.VERBOSE)
_SRS1 = re.compile(r"""
SRS1 # Prefix tag
[-+=]
([^=]+) # Hash
=
([^=]+) # 1st hop host
=
([-+=].+) # Opaque part w/ leading separator, generated by 1st hop
""", re.IGNORECASE | re.VERBOSE)
# 5-bit / base 32 alphabet for timestamp encoding as described in the spec.
# Note that this is NOT the same as RFC4648 or RFC3548 Base32 encoding, which
# are both 8-bit / base 256 encodings.
_TS_ALPHABET = string.ascii_uppercase + '234567'
# Reverse lookup table for _TS_ALPHABET. Defined in __init__() as list
# comprehensions in class scope cannot access class scope.
_TS_REVERSE = {}
_SECONDS_IN_DAY = datetime.timedelta(days=1).total_seconds()
def __init__(
self,
secret,
prev_secrets=[],
validity_days=21,
hash_length=4):
# type: (Union[str, bytes], List[Union[str, bytes]], int, int)
"""Creates a new SRS configuration instance.
Args:
secret (str|bytes): Cryptographic secret for creating / rwversing
rewritten addresses.
prev_secrets (list(str|bytes)): Previously used secrets that are still
considered valid for reversing rewritten addresses.
validity_days (int): Number of days after which rewritten addresses cannot
be reversed.
hash_length (int): Length to truncate hash digest to.
"""
self._TS_REVERSE = {
self._TS_ALPHABET[i]: i
for i in range(len(self._TS_ALPHABET))
}
self._secret = self._to_bytes(secret)
self._prev_secrets = [self._to_bytes(secret) for secret in prev_secrets]
self._validity_days = validity_days
self._hash_length = hash_length
# Cached list of all valid timestamps. The first element is the current
# timestamp.
self._valid_ts_cache = None
# Used for testing timestamp checks.
self._time_fn = time.time
def forward(self, from_addr, alias_host):
# type: (str, str) -> str
"""Rewrites sender address `from_addr` to `alias_host`.
As described in the SRS specification, the algorithm is:
- If the envelope sender address (`from_addr`) is an SRS1 address
rewritten by 1stHop.com to SRS0 and later by nthHop.com to SRS1, rewrite
to a new SRS1 address such that bounces will go to us then 1stHop.com.
- If `from_addr` is an SRS0 address rewritten by 1stHop.com, rewrite to
an SRS1 address such that bounces will go to us then back to 1stHop.com.
- If `from_addr` is neither an SRS0 address nor an SRS1 address, rewrite
to an SRS0 address such that bounces will go to us then back to
`from_addr`.
Args:
from_addr (str): The original envelope sender address.
alias_host (str): The host to rewrite to (current host).
Returns:
str: The envelope sender address rewritten to `alias_host`.
Raises:
:obj:`srslib.InvalidAddressError`: `from_addr` is not a valid email
address.
"""
from_local_part, from_host = self._split_addr(from_addr)
# Case 1: Address is an SRS1 address. We are hop > 2, and we replace the
# hash with our own to generate a new SRS1 address that also bounces to the
# 1st hop.
m = self._SRS1.match(from_local_part)
if m:
return self.generate_srs1_address(m.group(2), m.group(3), alias_host)
# Case 2: Address is an SRS0 address. We are the 2nd hop, and we will return
# an SRS1 address that bounces to the 1st hop.
m = self._SRS0_OPAQUE.match(from_local_part)
if m:
return self.generate_srs1_address(from_host, m.group(1), alias_host)
# Case 3: We are the 1st hop. We will return an SRS0 address.
return self.generate_srs0_address(from_host, from_local_part, alias_host)
def reverse(self, srs_addr):
# type: (str) -> str
"""Reverses a rewritten address.
As described in the SRS specification, the algorithm is:
- If `srs_addr` is an SRS0 address rewritten by us, bounce to the
original envelope sender address.
- If `srs_addr` is an SRS1 address rewritten by 1stHop.com and then
us, bounce to the SRS0 address rewritten by 1stHop.com.
Args:
srs_addr (str): An SRS0 or SRS1 address.
Returns:
str: The address to bounce to.
Raises:
:obj:`srslib.InvalidAddressError`: `srs_addr` is not a valid email
address.
:obj:`srslib.InvalidHashError`: The hash string in `srs_addr` is invalid.
:obj:`srslib.InvalidTimestampError`: The timestamp string in `srs_addr` is
invalid or expired.
"""
from_local_part, from_host = self._split_addr(srs_addr)
# Case 1: Address is an SRS1 address. We were hop n >= 2 in the forwarding
# chain, and we will bounce back to hop 1.
m = self._SRS1.match(from_local_part)
if m:
self.check_hash(m.group(1), m.group(2) + m.group(3), srs_addr)
return 'SRS0%s@%s' % (m.group(3), m.group(2))
# Case 2: Address is an SRS0 address. We were the first hop in the
# forwarding chain, and we will bounce back to the original envelope sender.
m = self._SRS0.match(from_local_part)
if m:
self.check_hash(
m.group(1), m.group(2) + m.group(3) + m.group(4), srs_addr)
self.check_ts(m.group(2), srs_addr)
return '%s@%s' % (m.group(4), m.group(3))
raise InvalidAddressError('Unrecognized SRS address: "%s"' % srs_addr)
@classmethod
def is_srs_address(cls, addr, strict=True):
# type: (str, bool) -> bool
"""Checks if an address is a valid SRS address.
If strict is True, this function will only consider SRS0 addresses formatted
according to the Guarded scheme as valid. If strict is False, any address
with an SRS0 prefix and separator is considered valid.
Args:
addr (str): An email address, e.g. `foo@example.com`.
strict (bool): Whether to check SRS0 addresses in strict mode.
Raises:
:obj:`srslib.InvalidAddressError`: `addr` is not a valid email
address.
"""
local_part, host = cls._split_addr(addr)
srs0 = cls._SRS0 if strict else cls._SRS0_OPAQUE
return bool(srs0.match(addr) or cls._SRS1.match(addr))
def generate_srs0_address(
self, orig_host, orig_local_part, alias_host):
# type: (str, str, str) -> str
"""Produces an SRS0 address.
Args:
orig_host (str): Host part of the original envelope sender address.
orig_local_part (str): Local part of the original envelope sender address.
alias_host (str): The host to rewrite to (current host).
Returns:
str: The rewritten SRS0 address.
"""
ts = self.generate_ts()
return 'SRS0=%s=%s=%s=%s@%s' % (
self.generate_hash(
ts + orig_host + orig_local_part,
self._secret,
self._hash_length),
ts,
orig_host,
orig_local_part,
alias_host)
def generate_srs1_address(
self, first_hop_host, first_hop_local_part, alias_host):
# type: (str, str, str) -> str
"""Produces an SRS1 address.
Args:
first_hop_host (str): Address of the 1st hop (SRS0) host.
first_hop_local_part (str): Local part generated by 1st hop host
(w/o the "SRS0" prefix)
alias_host (str): The host to rewrite to (current host).
Returns:
str: The rewritten SRS1 address.
"""
return 'SRS1=%s=%s=%s@%s' % (
self.generate_hash(
first_hop_host + first_hop_local_part,
self._secret,
self._hash_length),
first_hop_host,
first_hop_local_part,
alias_host)
@classmethod
def _split_addr(cls, addr):
# type: (str) -> Tuple[str, str]
"""Splits an email address to (local_part, host)."""
try:
local_part, host = addr.split('@')
except ValueError:
raise InvalidAddressError('Invalid from_addr address: "%s"' % addr)
else:
return (local_part, host)
def generate_hash(self, s, secret, hash_length):
# type: (str, bytes, int) -> str
"""Produces a hash string for use in an SRS address.
As recommended in the specification, this function yields a base64-encoded
hash of the provided string in lower case using the HMAC-SHA1 algorithm, and
truncates it to hash_length characters.
Args:
s (str): Input string to hash.
secret (bytes): The cryptographic secret to use.
hash_length (int): Length to truncate the generated hash digest to.
Returns:
str: SRS hash string, truncated to `hash_length`.
"""
return (
base64.b64encode(
hmac.new(secret, s.lower().encode('utf-8'), hashlib.sha1).digest())
[:hash_length]
.decode('utf-8'))
def check_hash(self, h, s, addr):
# type: (str, str, str) -> None
"""Checks a hash (`h`) against an input string (`s`).
Following the canonical implementation (libsrs2), hashes are compared
case-insensively.
Args:
h (str): A hash string possibly generated by the algorithm described in
`generate_hash`.
s (str): Original hashed string.
addr (str): The full address being reversed.
Raises:
:obj:`srslib.InvalidHashError`: Hash is invalid.
"""
if not any(
h.lower() == self.generate_hash(s, secret, len(h)).lower()
for secret in [self._secret] + self._prev_secrets):
raise InvalidHashError('Invalid hash in SRS address: "%s"' % addr)
def generate_ts(self, t=None):
# type: (float) -> str
"""Produces a timestamp for use in an SRS0 address.
Following the algorithm in the original paper, this function yields the UNIX
timestamp of the current date modded by 2^10, encoded in base32.
Args:
t (float): If not None, specifies the UNIX timestamp to use instead of the
current time.
"""
t = int((t or self._time_fn()) // self._SECONDS_IN_DAY)
return ''.join(
self._TS_ALPHABET[x]
for x in (
(t >> 5) & 0b11111,
t & 0b11111,
))
def check_ts(self, ts, addr):
# type: (str, str) -> None
"""Checks an encoded timestamp string against current time.
Args:
ts (str): A timestamp possibly generated by the algorithm described in
`generate_ts`.
addr (str): The full address being reversed.
Raises:
:obj:`srslib.InvalidTimestampError`: timestamp is invalid.
"""
if (self._valid_ts_cache is None or
self._valid_ts_cache[0] != self.generate_ts()):
now = self._time_fn()
self._valid_ts_cache = [
self.generate_ts(now - i * self._SECONDS_IN_DAY)
for i in range(self._validity_days)
]
if ts.upper() not in self._valid_ts_cache:
raise InvalidTimestampError(
'Invalid timestamp in SRS address: "%s"' % addr)
def _to_bytes(self, secret):
# type: (Union[str, bytes]) -> bytes
"""Ensures that a client-provided secret is in bytes."""
if isinstance(secret, bytes):
return secret
elif isinstance(secret, str):
return secret.encode('utf-8')
else:
raise Error('SRS secret must be bytes or str, got %s' % type(secret))
|
jichu4n/srslib
|
srslib.py
|
Python
|
apache-2.0
| 14,041
|
from base64 import decodebytes
from subprocess import call
import paramiko, logging
import pysftp, sys, os, zipfile
print("Building")
call([os.path.join('.', 'node_modules', '.bin', 'ng'), 'build', '--prod'])
def zip(src, dst):
zf = zipfile.ZipFile("%s.zip" % (dst), "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
zf.close()
print("Zipping")
zip('dist', 'dist')
logging.basicConfig(filename="./log.txt", level=logging.DEBUG)
pathToEnv='src/environments/environment.prod.ts'
zipFile = "dist.zip"
keydata = None
with open(pathToEnv, 'r') as file:
for line in file.readlines():
line = line.strip()
tokens = line.split(": ")
if(len(tokens) == 2):
tokens[1] = tokens[1].replace("'","").replace(",",'')
if(tokens[0] == "server"):
srv = tokens[1]
elif(tokens[0] == "user"):
usr = tokens[1]
elif(tokens[0] == 'pw'):
pw = tokens[1]
elif(tokens[0] == 'key'):
keydata = bytes(tokens[1], 'utf-8')
cnopts = pysftp.CnOpts()
if keydata is not None:
key = paramiko.RSAKey(data=decodebytes(keydata))
cnopts.hostkeys.add(srv, 'ssh-rsa', key)
else:
cnopts.hostkeys = None
print("Uploading dist")
with pysftp.Connection(host=srv, username=usr,password=pw, cnopts=cnopts) as sftp:
with sftp.cd('/var/www'):
sftp.put("dist.zip")
print("Updating server")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
ssh.connect(hostname=srv, username=usr, password=pw)
stdin, stdout, stderr = ssh.exec_command("(cd /var/www; ./up.sh)", get_pty=True)
print("StdOut")
for line in iter(stdout.readline, ""):
print(line, end="")
print("StdErr")
for line in iter(stderr.readline, ""):
print(line, end="")
|
shamrickus/GloryOfNephilim
|
upload.py
|
Python
|
apache-2.0
| 2,024
|
#BEWARE: automatically generated code
#This code was generated by /generate/__main__.py
from opengl.gl.raw.bindings import *
VERSION_ES_CL_1_0 = 1
VERSION_ES_CM_1_1 = 1
VERSION_ES_CL_1_1 = 1
DEPTH_BUFFER_BIT = 0x00000100
STENCIL_BUFFER_BIT = 0x00000400
COLOR_BUFFER_BIT = 0x00004000
FALSE = 0
TRUE = 1
POINTS = 0x0000
LINES = 0x0001
LINE_LOOP = 0x0002
LINE_STRIP = 0x0003
TRIANGLES = 0x0004
TRIANGLE_STRIP = 0x0005
TRIANGLE_FAN = 0x0006
NEVER = 0x0200
LESS = 0x0201
EQUAL = 0x0202
LEQUAL = 0x0203
GREATER = 0x0204
NOTEQUAL = 0x0205
GEQUAL = 0x0206
ALWAYS = 0x0207
ZERO = 0
ONE = 1
SRC_COLOR = 0x0300
ONE_MINUS_SRC_COLOR = 0x0301
SRC_ALPHA = 0x0302
ONE_MINUS_SRC_ALPHA = 0x0303
DST_ALPHA = 0x0304
ONE_MINUS_DST_ALPHA = 0x0305
DST_COLOR = 0x0306
ONE_MINUS_DST_COLOR = 0x0307
SRC_ALPHA_SATURATE = 0x0308
CLIP_PLANE0 = 0x3000
CLIP_PLANE1 = 0x3001
CLIP_PLANE2 = 0x3002
CLIP_PLANE3 = 0x3003
CLIP_PLANE4 = 0x3004
CLIP_PLANE5 = 0x3005
FRONT = 0x0404
BACK = 0x0405
FRONT_AND_BACK = 0x0408
FOG = 0x0B60
LIGHTING = 0x0B50
TEXTURE_2D = 0x0DE1
CULL_FACE = 0x0B44
ALPHA_TEST = 0x0BC0
BLEND = 0x0BE2
COLOR_LOGIC_OP = 0x0BF2
DITHER = 0x0BD0
STENCIL_TEST = 0x0B90
DEPTH_TEST = 0x0B71
POINT_SMOOTH = 0x0B10
LINE_SMOOTH = 0x0B20
SCISSOR_TEST = 0x0C11
COLOR_MATERIAL = 0x0B57
NORMALIZE = 0x0BA1
RESCALE_NORMAL = 0x803A
VERTEX_ARRAY = 0x8074
NORMAL_ARRAY = 0x8075
COLOR_ARRAY = 0x8076
TEXTURE_COORD_ARRAY = 0x8078
MULTISAMPLE = 0x809D
SAMPLE_ALPHA_TO_COVERAGE = 0x809E
SAMPLE_ALPHA_TO_ONE = 0x809F
SAMPLE_COVERAGE = 0x80A0
NO_ERROR = 0
INVALID_ENUM = 0x0500
INVALID_VALUE = 0x0501
INVALID_OPERATION = 0x0502
STACK_OVERFLOW = 0x0503
STACK_UNDERFLOW = 0x0504
OUT_OF_MEMORY = 0x0505
EXP = 0x0800
EXP2 = 0x0801
FOG_DENSITY = 0x0B62
FOG_START = 0x0B63
FOG_END = 0x0B64
FOG_MODE = 0x0B65
FOG_COLOR = 0x0B66
CW = 0x0900
CCW = 0x0901
CURRENT_COLOR = 0x0B00
CURRENT_NORMAL = 0x0B02
CURRENT_TEXTURE_COORDS = 0x0B03
POINT_SIZE = 0x0B11
POINT_SIZE_MIN = 0x8126
POINT_SIZE_MAX = 0x8127
POINT_FADE_THRESHOLD_SIZE = 0x8128
POINT_DISTANCE_ATTENUATION = 0x8129
SMOOTH_POINT_SIZE_RANGE = 0x0B12
LINE_WIDTH = 0x0B21
SMOOTH_LINE_WIDTH_RANGE = 0x0B22
ALIASED_POINT_SIZE_RANGE = 0x846D
ALIASED_LINE_WIDTH_RANGE = 0x846E
CULL_FACE_MODE = 0x0B45
FRONT_FACE = 0x0B46
SHADE_MODEL = 0x0B54
DEPTH_RANGE = 0x0B70
DEPTH_WRITEMASK = 0x0B72
DEPTH_CLEAR_VALUE = 0x0B73
DEPTH_FUNC = 0x0B74
STENCIL_CLEAR_VALUE = 0x0B91
STENCIL_FUNC = 0x0B92
STENCIL_VALUE_MASK = 0x0B93
STENCIL_FAIL = 0x0B94
STENCIL_PASS_DEPTH_FAIL = 0x0B95
STENCIL_PASS_DEPTH_PASS = 0x0B96
STENCIL_REF = 0x0B97
STENCIL_WRITEMASK = 0x0B98
MATRIX_MODE = 0x0BA0
VIEWPORT = 0x0BA2
MODELVIEW_STACK_DEPTH = 0x0BA3
PROJECTION_STACK_DEPTH = 0x0BA4
TEXTURE_STACK_DEPTH = 0x0BA5
MODELVIEW_MATRIX = 0x0BA6
PROJECTION_MATRIX = 0x0BA7
TEXTURE_MATRIX = 0x0BA8
ALPHA_TEST_FUNC = 0x0BC1
ALPHA_TEST_REF = 0x0BC2
BLEND_DST = 0x0BE0
BLEND_SRC = 0x0BE1
LOGIC_OP_MODE = 0x0BF0
SCISSOR_BOX = 0x0C10
COLOR_CLEAR_VALUE = 0x0C22
COLOR_WRITEMASK = 0x0C23
MAX_LIGHTS = 0x0D31
MAX_CLIP_PLANES = 0x0D32
MAX_TEXTURE_SIZE = 0x0D33
MAX_MODELVIEW_STACK_DEPTH = 0x0D36
MAX_PROJECTION_STACK_DEPTH = 0x0D38
MAX_TEXTURE_STACK_DEPTH = 0x0D39
MAX_VIEWPORT_DIMS = 0x0D3A
MAX_TEXTURE_UNITS = 0x84E2
SUBPIXEL_BITS = 0x0D50
RED_BITS = 0x0D52
GREEN_BITS = 0x0D53
BLUE_BITS = 0x0D54
ALPHA_BITS = 0x0D55
DEPTH_BITS = 0x0D56
STENCIL_BITS = 0x0D57
POLYGON_OFFSET_UNITS = 0x2A00
POLYGON_OFFSET_FILL = 0x8037
POLYGON_OFFSET_FACTOR = 0x8038
TEXTURE_BINDING_2D = 0x8069
VERTEX_ARRAY_SIZE = 0x807A
VERTEX_ARRAY_TYPE = 0x807B
VERTEX_ARRAY_STRIDE = 0x807C
NORMAL_ARRAY_TYPE = 0x807E
NORMAL_ARRAY_STRIDE = 0x807F
COLOR_ARRAY_SIZE = 0x8081
COLOR_ARRAY_TYPE = 0x8082
COLOR_ARRAY_STRIDE = 0x8083
TEXTURE_COORD_ARRAY_SIZE = 0x8088
TEXTURE_COORD_ARRAY_TYPE = 0x8089
TEXTURE_COORD_ARRAY_STRIDE = 0x808A
VERTEX_ARRAY_POINTER = 0x808E
NORMAL_ARRAY_POINTER = 0x808F
COLOR_ARRAY_POINTER = 0x8090
TEXTURE_COORD_ARRAY_POINTER = 0x8092
SAMPLE_BUFFERS = 0x80A8
SAMPLES = 0x80A9
SAMPLE_COVERAGE_VALUE = 0x80AA
SAMPLE_COVERAGE_INVERT = 0x80AB
NUM_COMPRESSED_TEXTURE_FORMATS = 0x86A2
COMPRESSED_TEXTURE_FORMATS = 0x86A3
DONT_CARE = 0x1100
FASTEST = 0x1101
NICEST = 0x1102
PERSPECTIVE_CORRECTION_HINT = 0x0C50
POINT_SMOOTH_HINT = 0x0C51
LINE_SMOOTH_HINT = 0x0C52
FOG_HINT = 0x0C54
GENERATE_MIPMAP_HINT = 0x8192
LIGHT_MODEL_AMBIENT = 0x0B53
LIGHT_MODEL_TWO_SIDE = 0x0B52
AMBIENT = 0x1200
DIFFUSE = 0x1201
SPECULAR = 0x1202
POSITION = 0x1203
SPOT_DIRECTION = 0x1204
SPOT_EXPONENT = 0x1205
SPOT_CUTOFF = 0x1206
CONSTANT_ATTENUATION = 0x1207
LINEAR_ATTENUATION = 0x1208
QUADRATIC_ATTENUATION = 0x1209
BYTE = 0x1400
UNSIGNED_BYTE = 0x1401
SHORT = 0x1402
UNSIGNED_SHORT = 0x1403
FLOAT = 0x1406
FIXED = 0x140C
CLEAR = 0x1500
AND = 0x1501
AND_REVERSE = 0x1502
COPY = 0x1503
AND_INVERTED = 0x1504
NOOP = 0x1505
XOR = 0x1506
OR = 0x1507
NOR = 0x1508
EQUIV = 0x1509
INVERT = 0x150A
OR_REVERSE = 0x150B
COPY_INVERTED = 0x150C
OR_INVERTED = 0x150D
NAND = 0x150E
SET = 0x150F
EMISSION = 0x1600
SHININESS = 0x1601
AMBIENT_AND_DIFFUSE = 0x1602
MODELVIEW = 0x1700
PROJECTION = 0x1701
TEXTURE = 0x1702
ALPHA = 0x1906
RGB = 0x1907
RGBA = 0x1908
LUMINANCE = 0x1909
LUMINANCE_ALPHA = 0x190A
UNPACK_ALIGNMENT = 0x0CF5
PACK_ALIGNMENT = 0x0D05
UNSIGNED_SHORT_4_4_4_4 = 0x8033
UNSIGNED_SHORT_5_5_5_1 = 0x8034
UNSIGNED_SHORT_5_6_5 = 0x8363
FLAT = 0x1D00
SMOOTH = 0x1D01
KEEP = 0x1E00
REPLACE = 0x1E01
INCR = 0x1E02
DECR = 0x1E03
VENDOR = 0x1F00
RENDERER = 0x1F01
VERSION = 0x1F02
EXTENSIONS = 0x1F03
MODULATE = 0x2100
DECAL = 0x2101
ADD = 0x0104
TEXTURE_ENV_MODE = 0x2200
TEXTURE_ENV_COLOR = 0x2201
TEXTURE_ENV = 0x2300
NEAREST = 0x2600
LINEAR = 0x2601
NEAREST_MIPMAP_NEAREST = 0x2700
LINEAR_MIPMAP_NEAREST = 0x2701
NEAREST_MIPMAP_LINEAR = 0x2702
LINEAR_MIPMAP_LINEAR = 0x2703
TEXTURE_MAG_FILTER = 0x2800
TEXTURE_MIN_FILTER = 0x2801
TEXTURE_WRAP_S = 0x2802
TEXTURE_WRAP_T = 0x2803
GENERATE_MIPMAP = 0x8191
TEXTURE0 = 0x84C0
TEXTURE1 = 0x84C1
TEXTURE2 = 0x84C2
TEXTURE3 = 0x84C3
TEXTURE4 = 0x84C4
TEXTURE5 = 0x84C5
TEXTURE6 = 0x84C6
TEXTURE7 = 0x84C7
TEXTURE8 = 0x84C8
TEXTURE9 = 0x84C9
TEXTURE10 = 0x84CA
TEXTURE11 = 0x84CB
TEXTURE12 = 0x84CC
TEXTURE13 = 0x84CD
TEXTURE14 = 0x84CE
TEXTURE15 = 0x84CF
TEXTURE16 = 0x84D0
TEXTURE17 = 0x84D1
TEXTURE18 = 0x84D2
TEXTURE19 = 0x84D3
TEXTURE20 = 0x84D4
TEXTURE21 = 0x84D5
TEXTURE22 = 0x84D6
TEXTURE23 = 0x84D7
TEXTURE24 = 0x84D8
TEXTURE25 = 0x84D9
TEXTURE26 = 0x84DA
TEXTURE27 = 0x84DB
TEXTURE28 = 0x84DC
TEXTURE29 = 0x84DD
TEXTURE30 = 0x84DE
TEXTURE31 = 0x84DF
ACTIVE_TEXTURE = 0x84E0
CLIENT_ACTIVE_TEXTURE = 0x84E1
REPEAT = 0x2901
CLAMP_TO_EDGE = 0x812F
LIGHT0 = 0x4000
LIGHT1 = 0x4001
LIGHT2 = 0x4002
LIGHT3 = 0x4003
LIGHT4 = 0x4004
LIGHT5 = 0x4005
LIGHT6 = 0x4006
LIGHT7 = 0x4007
ARRAY_BUFFER = 0x8892
ELEMENT_ARRAY_BUFFER = 0x8893
ARRAY_BUFFER_BINDING = 0x8894
ELEMENT_ARRAY_BUFFER_BINDING = 0x8895
VERTEX_ARRAY_BUFFER_BINDING = 0x8896
NORMAL_ARRAY_BUFFER_BINDING = 0x8897
COLOR_ARRAY_BUFFER_BINDING = 0x8898
TEXTURE_COORD_ARRAY_BUFFER_BINDING = 0x889A
STATIC_DRAW = 0x88E4
DYNAMIC_DRAW = 0x88E8
BUFFER_SIZE = 0x8764
BUFFER_USAGE = 0x8765
SUBTRACT = 0x84E7
COMBINE = 0x8570
COMBINE_RGB = 0x8571
COMBINE_ALPHA = 0x8572
RGB_SCALE = 0x8573
ADD_SIGNED = 0x8574
INTERPOLATE = 0x8575
CONSTANT = 0x8576
PRIMARY_COLOR = 0x8577
PREVIOUS = 0x8578
OPERAND0_RGB = 0x8590
OPERAND1_RGB = 0x8591
OPERAND2_RGB = 0x8592
OPERAND0_ALPHA = 0x8598
OPERAND1_ALPHA = 0x8599
OPERAND2_ALPHA = 0x859A
ALPHA_SCALE = 0x0D1C
SRC0_RGB = 0x8580
SRC1_RGB = 0x8581
SRC2_RGB = 0x8582
SRC0_ALPHA = 0x8588
SRC1_ALPHA = 0x8589
SRC2_ALPHA = 0x858A
DOT3_RGB = 0x86AE
DOT3_RGBA = 0x86AF
@accepts(t.enum, t.float)
@returns(t.void)
@binds(dll)
def alpha_func(func, ref):
'''
specify the alpha test function.
gl.alpha_func operates on all pixel write operations, including those
resulting from the scan conversion of points, lines, polygons, and bitmaps,
and from pixel draw and copy operations. gl.alpha_func does not affect
screen clear operations.
Args:
func: the alpha comparison function.
ref: the reference value that incoming alpha values are compared to.
'''
@accepts(t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def clear_color(red, green, blue, alpha):
'''
specify clear values for the color buffers.
gl.clear_color specifies the red, green, blue, and alpha values used by
gl.clear to clear the color buffers. Values specified by gl.clear_color are
clamped to the range 0 1.
Args:
red: the red, green, blue, and alpha values used when the color buffers
are cleared.
green: the red, green, blue, and alpha values used when the color
buffers are cleared.
blue: the red, green, blue, and alpha values used when the color buffers
are cleared.
alpha: the red, green, blue, and alpha values used when the color
buffers are cleared.
'''
@accepts(t.float)
@returns(t.void)
@binds(dll)
def clear_depthf(d):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def clip_planef(p, eqn):
pass
@accepts(t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def color4f(red, green, blue, alpha):
pass
@accepts(t.float, t.float)
@returns(t.void)
@binds(dll)
def depth_rangef(n, f):
pass
@accepts(t.enum, t.float)
@returns(t.void)
@binds(dll)
def fogf(pname, param):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def fogfv(pname, params):
pass
@accepts(t.float, t.float, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def frustumf(l, r, b, t, n, f):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_clip_planef(plane, equation):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_floatv(pname, data):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_lightfv(light, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_materialfv(face, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_tex_envfv(target, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_tex_parameterfv(target, pname, params):
pass
@accepts(t.enum, t.float)
@returns(t.void)
@binds(dll)
def light_modelf(pname, param):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def light_modelfv(pname, params):
pass
@accepts(t.enum, t.enum, t.float)
@returns(t.void)
@binds(dll)
def lightf(light, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def lightfv(light, pname, params):
pass
@accepts(t.float)
@returns(t.void)
@binds(dll)
def line_width(width):
'''
specify the width of rasterized lines.
gl.line_width specifies the rasterized width of both aliased and antialiased
lines. Using a line width other than 1 has different effects, depending on
whether line antialiasing is enabled. To enable and disable line
antialiasing, call gl.enable and gl.disable with argument gl.LINE_SMOOTH.
Line antialiasing is initially disabled.
Args:
width: the width of rasterized lines.
'''
@accepts(POINTER(t.float))
@returns(t.void)
@binds(dll)
def load_matrixf(m):
pass
@accepts(t.enum, t.enum, t.float)
@returns(t.void)
@binds(dll)
def materialf(face, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def materialfv(face, pname, params):
pass
@accepts(POINTER(t.float))
@returns(t.void)
@binds(dll)
def mult_matrixf(m):
pass
@accepts(t.enum, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def multi_tex_coord4f(target, s, t, r, q):
pass
@accepts(t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def normal3f(nx, ny, nz):
pass
@accepts(t.float, t.float, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def orthof(l, r, b, t, n, f):
pass
@accepts(t.enum, t.float)
@returns(t.void)
@binds(dll)
def point_parameterf(pname, param):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def point_parameterfv(pname, params):
pass
@accepts(t.float)
@returns(t.void)
@binds(dll)
def point_size(size):
'''
specify the diameter of rasterized points.
gl.point_size specifies the rasterized diameter of points. If point size
mode is disabled, this value will be used to rasterize points. Otherwise,
the value written to the shading language built-in variable gl_PointSize
will be used.
Args:
size: the diameter of rasterized points.
'''
@accepts(t.float, t.float)
@returns(t.void)
@binds(dll)
def polygon_offset(factor, units):
'''
set the scale and units used to calculate depth values.
gl.polygon_offset is useful for rendering hidden-line images, for applying
decals to surfaces, and for rendering solids with highlighted edges.
Args:
factor: a scale factor that is used to create a variable depth offset
for each polygon.
units: is multiplied by an implementation-specific value to create a
constant depth offset.
'''
@accepts(t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def rotatef(angle, x, y, z):
pass
@accepts(t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def scalef(x, y, z):
pass
@accepts(t.enum, t.enum, t.float)
@returns(t.void)
@binds(dll)
def tex_envf(target, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def tex_envfv(target, pname, params):
pass
@accepts(t.enum, t.enum, t.float)
@returns(t.void)
@binds(dll)
def tex_parameterf(target, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def tex_parameterfv(target, pname, params):
pass
@accepts(t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def translatef(x, y, z):
pass
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def active_texture(texture):
'''
select active texture unit.
gl.active_texture selects which texture unit subsequent texture state calls
will affect. The number of texture units an implementation supports is
implementation dependent, but must be at least 80.
Args:
texture: which texture unit to make active.
'''
@accepts(t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def alpha_funcx(func, ref):
pass
@accepts(t.enum, t.uint)
@returns(t.void)
@binds(dll)
def bind_buffer(target, buffer):
'''
bind a named buffer object.
gl.bind_buffer binds a buffer object to the specified buffer binding point.
Calling gl.bind_buffer with target set to one of the accepted symbolic
constants and buffer set to the name of a buffer object binds that buffer
object name to the target. If no buffer object with name buffer exists, one
is created with that name. When a buffer object is bound to a target, the
previous binding for that target is automatically broken.
Args:
target: the target to which the buffer object is bound, which must be
one of the buffer binding targets in the following table:.
buffer: the name of a buffer object.
'''
@accepts(t.enum, t.uint)
@returns(t.void)
@binds(dll)
def bind_texture(target, texture):
'''
bind a named texture to a texturing target.
gl.bind_texture lets you create or use a named texture. Calling
gl.bind_texture with target set to gl.TEXTURE_1D, gl.TEXTURE_2D,
gl.TEXTURE_3D, gl.TEXTURE_1D_ARRAY, gl.TEXTURE_2D_ARRAY,
gl.TEXTURE_RECTANGLE, gl.TEXTURE_CUBE_MAP, gl.TEXTURE_CUBE_MAP_ARRAY,
gl.TEXTURE_BUFFER, gl.TEXTURE_2D_MULTISAMPLE or
gl.TEXTURE_2D_MULTISAMPLE_ARRAY and texture set to the name of the new
texture binds the texture name to the target. When a texture is bound to a
target, the previous binding for that target is automatically broken.
Args:
target: the target to which the texture is bound.
texture: the name of a texture.
'''
@accepts(t.enum, t.enum)
@returns(t.void)
@binds(dll)
def blend_func(sfactor, dfactor):
'''
specify pixel arithmetic.
gl.blend_func defines the operation of blending for all draw buffers when it
is enabled. gl.blend_funci defines the operation of blending for a single
draw buffer specified by buf when enabled for that draw buffer. sfactor
specifies which method is used to scale the source color components. dfactor
specifies which method is used to scale the destination color components.
Args:
sfactor: how the red, green, blue, and alpha source blending factors are
computed.
dfactor: how the red, green, blue, and alpha destination blending
factors are computed.
'''
@accepts(t.enum, t.sizeiptr, t.void, t.enum)
@returns(t.void)
@binds(dll)
def buffer_data(target, size, data, usage):
'''
creates and initializes a buffer object's data store.
gl.buffer_data and gl.named_buffer_data create a new data store for a buffer
object. In case of gl.buffer_data, the buffer object currently bound to
target is used. For gl.named_buffer_data, a buffer object associated with ID
specified by the caller in buffer will be used instead.
Args:
target: the target to which the buffer object is bound for glbufferdata,
which must be one of the buffer binding targets in the following
table:.
size: the size in bytes of the buffer object's new data store.
data: a pointer to data that will be copied into the data store for
initialization, or null if no data is to be copied.
usage: the expected usage pattern of the data store.
'''
@accepts(t.enum, t.intptr, t.sizeiptr, t.void)
@returns(t.void)
@binds(dll)
def buffer_sub_data(target, offset, size, data):
'''
updates a subset of a buffer object's data store.
gl.buffer_sub_data and gl.named_buffer_sub_data redefine some or all of the
data store for the specified buffer object. Data starting at byte offset
offset and extending for size bytes is copied to the data store from the
memory pointed to by data. offset and size must define a range lying
entirely within the buffer object's data store.
Args:
target: the target to which the buffer object is bound for
glbuffersubdata, which must be one of the buffer binding targets in
the following table:.
offset: the offset into the buffer object's data store where data
replacement will begin, measured in bytes.
size: the size in bytes of the data store region being replaced.
data: a pointer to the new data that will be copied into the data store.
'''
@accepts(t.bitfield)
@returns(t.void)
@binds(dll)
def clear(mask):
'''
clear buffers to preset values.
gl.clear sets the bitplane area of the window to values previously selected
by gl.clear_color, gl.clear_depth, and gl.clear_stencil. Multiple color
buffers can be cleared simultaneously by selecting more than one buffer at a
time using gl.draw_buffer.
Args:
mask: bitwise or of masks that indicate the buffers to be cleared.
'''
@accepts(t.fixed, t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def clear_colorx(red, green, blue, alpha):
pass
@accepts(t.fixed)
@returns(t.void)
@binds(dll)
def clear_depthx(depth):
pass
@accepts(t.int)
@returns(t.void)
@binds(dll)
def clear_stencil(s):
'''
specify the clear value for the stencil buffer.
gl.clear_stencil specifies the index used by gl.clear to clear the stencil
buffer. s is masked with 2 m - 1 , where.
Args:
s: the index used when the stencil buffer is cleared.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def client_active_texture(texture):
'''
select active texture unit.
gl.client_active_texture selects the vertex array client state parameters to
be modified by gl.tex_coord_pointer, and enabled or disabled with
gl.enable_client_state or gl.disable_client_state, respectively, when called
with a parameter of gl.TEXTURE_COORD_ARRAY.
Args:
texture: which texture unit to make active.
'''
@accepts(t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def clip_planex(plane, equation):
pass
@accepts(t.ubyte, t.ubyte, t.ubyte, t.ubyte)
@returns(t.void)
@binds(dll)
def color4ub(red, green, blue, alpha):
pass
@accepts(t.fixed, t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def color4x(red, green, blue, alpha):
pass
@accepts(t.boolean, t.boolean, t.boolean, t.boolean)
@returns(t.void)
@binds(dll)
def color_mask(red, green, blue, alpha):
'''
enable and disable writing of frame buffer color components.
gl.color_mask and gl.color_maski specify whether the individual color
components in the frame buffer can or cannot be written. gl.color_maski sets
the mask for a specific draw buffer, whereas gl.color_mask sets the mask for
all draw buffers. If red is gl.FALSE, for example, no change is made to the
red component of any pixel in any of the color buffers, regardless of the
drawing operation attempted.
Args:
red: whether red, green, blue, and alpha are to be written into the
frame buffer.
green: whether red, green, blue, and alpha are to be written into the
frame buffer.
blue: whether red, green, blue, and alpha are to be written into the
frame buffer.
alpha: whether red, green, blue, and alpha are to be written into the
frame buffer.
'''
@accepts(t.int, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def color_pointer(size, type, stride, pointer):
'''
define an array of colors.
gl.color_pointer specifies the location and data format of an array of color
components to use when rendering. size specifies the number of components
per color, and must be 3 or 4. type specifies the data type of each color
component, and stride specifies the byte stride from one color to the next,
allowing vertices and attributes to be packed into a single array or stored
in separate arrays. (Single-array storage may be more efficient on some
implementations; see gl.interleaved_arrays.
Args:
size: the number of components per color.
type: the data type of each color component in the array.
stride: the byte offset between consecutive colors.
pointer: a pointer to the first component of the first color element in
the array.
'''
@accepts(t.enum, t.int, t.enum, t.sizei, t.sizei, t.int, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_image2_d(target, level, internalformat, width, height, border, imagesize, data):
'''
specify a two-dimensional texture image in a compressed format.
Args:
target: the target texture.
level: the level-of-detail number.
internalformat: the format of the compressed image data stored at
address data.
width: the width of the texture image.
height: the height of the texture image.
border: this value must be 0.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.int, t.int, t.sizei, t.sizei, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_sub_image2_d(target, level, xoffset, yoffset, width, height, format, imagesize, data):
'''
specify a two-dimensional texture subimage in a compressed format.
Args:
target: the target to which the texture is bound for
glcompressedtexsubimage2d function.
level: the level-of-detail number.
xoffset: a texel offset in the x direction within the texture array.
yoffset: a texel offset in the y direction within the texture array.
width: the width of the texture subimage.
height: the height of the texture subimage.
format: the format of the compressed image data stored at address data.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.enum, t.int, t.int, t.sizei, t.sizei, t.int)
@returns(t.void)
@binds(dll)
def copy_tex_image2_d(target, level, internalformat, x, y, width, height, border):
'''
copy pixels into a 2D texture image.
Args:
target: the target texture.
level: the level-of-detail number.
internalformat: the internal format of the texture.
x: the window coordinates of the lower left corner of the rectangular
region of pixels to be copied.
y: the window coordinates of the lower left corner of the rectangular
region of pixels to be copied.
width: the width of the texture image.
height: the height of the texture image.
border: must be 0.
'''
@accepts(t.enum, t.int, t.int, t.int, t.int, t.int, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def copy_tex_sub_image2_d(target, level, xoffset, yoffset, x, y, width, height):
'''
copy a two-dimensional texture subimage.
Args:
target: the target to which the texture object is bound for
glcopytexsubimage2d function.
level: the level-of-detail number.
xoffset: a texel offset in the x direction within the texture array.
yoffset: a texel offset in the y direction within the texture array.
x: the window coordinates of the lower left corner of the rectangular
region of pixels to be copied.
y: the window coordinates of the lower left corner of the rectangular
region of pixels to be copied.
width: the width of the texture subimage.
height: the height of the texture subimage.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def cull_face(mode):
'''
specify whether front- or back-facing facets can be culled.
gl.cull_face specifies whether front- or back-facing facets are culled when
facet culling is enabled. Facet culling is initially disabled. To enable and
disable facet culling, call the gl.enable and gl.disable commands with the
argument gl.CULL_FACE. Facets include triangles, quadrilaterals, polygons,
and rectangles.
Args:
mode: whether front- or back-facing facets are candidates for culling.
'''
@accepts(t.sizei, POINTER(t.uint))
@returns(t.void)
@binds(dll)
def delete_buffers(n, buffers):
'''
delete named buffer objects.
gl.delete_buffers deletes n buffer objects named by the elements of the
array buffers. After a buffer object is deleted, it has no contents, and its
name is free for reuse. If a buffer object that is currently bound is
deleted, the binding reverts to 0.
Args:
n: the number of buffer objects to be deleted.
buffers: an array of buffer objects to be deleted.
'''
@accepts(t.sizei, POINTER(t.uint))
@returns(t.void)
@binds(dll)
def delete_textures(n, textures):
'''
delete named textures.
gl.delete_textures deletes n textures named by the elements of the array
textures. After a texture is deleted, it has no contents or dimensionality,
and its name is free for reuse. If a texture that is currently bound is
deleted, the binding reverts to 0.
Args:
n: the number of textures to be deleted.
textures: an array of textures to be deleted.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def depth_func(func):
'''
specify the value used for depth buffer comparisons.
gl.depth_func specifies the function used to compare each incoming pixel
depth value with the depth value present in the depth buffer. The comparison
is performed only if depth testing is enabled.
Args:
func: the depth comparison function.
'''
@accepts(t.boolean)
@returns(t.void)
@binds(dll)
def depth_mask(flag):
'''
enable or disable writing into the depth buffer.
gl.depth_mask specifies whether the depth buffer is enabled for writing. If
flag is gl.FALSE, depth buffer writing is disabled. Otherwise, it is
enabled. Initially, depth buffer writing is enabled.
Args:
flag: whether the depth buffer is enabled for writing.
'''
@accepts(t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def depth_rangex(n, f):
pass
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def disable(cap):
pass
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def disable_client_state(array):
pass
@accepts(t.enum, t.int, t.sizei)
@returns(t.void)
@binds(dll)
def draw_arrays(mode, first, count):
'''
render primitives from array data.
gl.draw_arrays specifies multiple geometric primitives with very few
subroutine calls. Instead of calling a GL procedure to pass each individual
vertex, normal, texture coordinate, edge flag, or color, you can prespecify
separate arrays of vertices, normals, and colors and use them to construct a
sequence of primitives with a single call to gl.draw_arrays.
Args:
mode: what kind of primitives to render.
first: the starting index in the enabled arrays.
count: the number of indices to be rendered.
'''
@accepts(t.enum, t.sizei, t.enum, t.void)
@returns(t.void)
@binds(dll)
def draw_elements(mode, count, type, indices):
'''
render primitives from array data.
gl.draw_elements specifies multiple geometric primitives with very few
subroutine calls. Instead of calling a GL function to pass each individual
vertex, normal, texture coordinate, edge flag, or color, you can prespecify
separate arrays of vertices, normals, and so on, and use them to construct a
sequence of primitives with a single call to gl.draw_elements.
Args:
mode: what kind of primitives to render.
count: the number of elements to be rendered.
type: the type of the values in indices.
indices: a pointer to the location where the indices are stored.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def enable(cap):
'''
enable or disable server-side GL capabilities.
gl.enable and gl.disable enable and disable various capabilities. Use
gl.is_enabled or gl.get to determine the current setting of any capability.
The initial value for each capability with the exception of gl.DITHER and
gl.MULTISAMPLE is gl.FALSE. The initial value for gl.DITHER and
gl.MULTISAMPLE is gl.TRUE.
Args:
cap: a symbolic constant indicating a gl capability.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def enable_client_state(array):
'''
enable or disable client-side capability.
Args:
array: the capability to enable.
'''
@accepts()
@returns(t.void)
@binds(dll)
def finish():
'''
block until all GL execution is complete.
gl.finish requires a round trip to the server.
'''
@accepts()
@returns(t.void)
@binds(dll)
def flush():
'''
force execution of GL commands in finite time.
gl.flush can return at any time. It does not wait until the execution of all
previously issued GL commands is complete.
'''
@accepts(t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def fogx(pname, param):
pass
@accepts(t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def fogxv(pname, param):
pass
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def front_face(mode):
'''
define front- and back-facing polygons.
Args:
mode: the orientation of front-facing polygons.
'''
@accepts(t.fixed, t.fixed, t.fixed, t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def frustumx(l, r, b, t, n, f):
pass
@accepts(t.enum, POINTER(t.boolean))
@returns(t.void)
@binds(dll)
def get_booleanv(pname, data):
pass
@accepts(t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_buffer_parameteriv(target, pname, params):
'''
return parameters of a buffer object.
gl.get_buffer_parameteriv returns in data a selected parameter of the buffer
object specified by target.
Args:
target: the target buffer object.
pname: the symbolic name of a buffer object parameter.
params: returns the requested parameter.
'''
@accepts(t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def get_clip_planex(plane, equation):
pass
@accepts(t.sizei, POINTER(t.uint))
@returns(t.void)
@binds(dll)
def gen_buffers(n, buffers):
'''
generate buffer object names.
gl.gen_buffers returns n buffer object names in buffers. There is no
guarantee that the names form a contiguous set of integers; however, it is
guaranteed that none of the returned names was in use immediately before the
call to gl.gen_buffers.
Args:
n: the number of buffer object names to be generated.
buffers: an array in which the generated buffer object names are stored.
'''
@accepts(t.sizei, POINTER(t.uint))
@returns(t.void)
@binds(dll)
def gen_textures(n, textures):
'''
generate texture names.
gl.gen_textures returns n texture names in textures. There is no guarantee
that the names form a contiguous set of integers; however, it is guaranteed
that none of the returned names was in use immediately before the call to
gl.gen_textures.
Args:
n: the number of texture names to be generated.
textures: an array in which the generated texture names are stored.
'''
@accepts()
@returns(t.enum)
@binds(dll)
def get_error():
'''
return error information.
'''
@accepts(t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def get_fixedv(pname, params):
pass
@accepts(t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_integerv(pname, data):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def get_lightxv(light, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def get_materialxv(face, pname, params):
pass
@accepts(t.enum, t.void)
@returns(t.void)
@binds(dll)
def get_pointerv(pname, params):
'''
return the address of the specified pointer.
gl.get_pointerv returns pointer information. pname indicates the pointer to
be returned, and params is a pointer to a location in which to place the
returned data.
Args:
pname: the pointer to be returned.
params: returns the pointer value specified by pname.
'''
@accepts(t.enum)
@returns(POINTER(t.ubyte))
@binds(dll)
def get_string(name):
'''
return a string describing the current GL connection.
gl.get_string returns a pointer to a static string describing some aspect of
the current GL connection.
Args:
name: a symbolic constant, one of gl_vendor, gl_renderer, gl_version, or
gl_shading_language_version.
'''
@accepts(t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_tex_enviv(target, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def get_tex_envxv(target, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_tex_parameteriv(target, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def get_tex_parameterxv(target, pname, params):
pass
@accepts(t.enum, t.enum)
@returns(t.void)
@binds(dll)
def hint(target, mode):
'''
specify implementation-specific hints.
Args:
target: a symbolic constant indicating the behavior to be controlled.
mode: a symbolic constant indicating the desired behavior.
'''
@accepts(t.uint)
@returns(t.boolean)
@binds(dll)
def is_buffer(buffer):
'''
determine if a name corresponds to a buffer object.
gl.is_buffer returns gl.TRUE if buffer is currently the name of a buffer
object. If buffer is zero, or is a non-zero value that is not currently the
name of a buffer object, or if an error occurs, gl.is_buffer returns
gl.FALSE.
Args:
buffer: a value that may be the name of a buffer object.
'''
@accepts(t.enum)
@returns(t.boolean)
@binds(dll)
def is_enabled(cap):
'''
test whether a capability is enabled.
gl.is_enabled returns gl.TRUE if cap is an enabled capability and returns
gl.FALSE otherwise. Boolean states that are indexed may be tested with
gl.is_enabledi. For gl.is_enabledi, index specifies the index of the
capability to test. index must be between zero and the count of indexed
capabilities for cap.
Args:
cap: a symbolic constant indicating a gl capability.
'''
@accepts(t.uint)
@returns(t.boolean)
@binds(dll)
def is_texture(texture):
'''
determine if a name corresponds to a texture.
gl.is_texture returns gl.TRUE if texture is currently the name of a texture.
If texture is zero, or is a non-zero value that is not currently the name of
a texture, or if an error occurs, gl.is_texture returns gl.FALSE.
Args:
texture: a value that may be the name of a texture.
'''
@accepts(t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def light_modelx(pname, param):
pass
@accepts(t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def light_modelxv(pname, param):
pass
@accepts(t.enum, t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def lightx(light, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def lightxv(light, pname, params):
pass
@accepts(t.fixed)
@returns(t.void)
@binds(dll)
def line_widthx(width):
pass
@accepts()
@returns(t.void)
@binds(dll)
def load_identity():
'''
replace the current matrix with the identity matrix.
'''
@accepts(POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def load_matrixx(m):
pass
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def logic_op(opcode):
'''
specify a logical pixel operation for rendering.
gl.logic_op specifies a logical operation that, when enabled, is applied
between the incoming RGBA color and the RGBA color at the corresponding
location in the frame buffer. To enable or disable the logical operation,
call gl.enable and gl.disable using the symbolic constant gl.COLOR_LOGIC_OP.
The initial value is disabled.
Args:
opcode: a symbolic constant that selects a logical operation.
'''
@accepts(t.enum, t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def materialx(face, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def materialxv(face, pname, param):
pass
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def matrix_mode(mode):
'''
specify which matrix is the current matrix.
gl.matrix_mode sets the current matrix mode.
Args:
mode: which matrix stack is the target for subsequent matrix operations.
'''
@accepts(POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def mult_matrixx(m):
pass
@accepts(t.enum, t.fixed, t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def multi_tex_coord4x(texture, s, t, r, q):
pass
@accepts(t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def normal3x(nx, ny, nz):
pass
@accepts(t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def normal_pointer(type, stride, pointer):
'''
define an array of normals.
gl.normal_pointer specifies the location and data format of an array of
normals to use when rendering. type specifies the data type of each normal
coordinate, and stride specifies the byte stride from one normal to the
next, allowing vertices and attributes to be packed into a single array or
stored in separate arrays.
Args:
type: the data type of each coordinate in the array.
stride: the byte offset between consecutive normals.
pointer: a pointer to the first coordinate of the first normal in the
array.
'''
@accepts(t.fixed, t.fixed, t.fixed, t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def orthox(l, r, b, t, n, f):
pass
@accepts(t.enum, t.int)
@returns(t.void)
@binds(dll)
def pixel_storei(pname, param):
pass
@accepts(t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def point_parameterx(pname, param):
pass
@accepts(t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def point_parameterxv(pname, params):
pass
@accepts(t.fixed)
@returns(t.void)
@binds(dll)
def point_sizex(size):
pass
@accepts(t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def polygon_offsetx(factor, units):
pass
@accepts()
@returns(t.void)
@binds(dll)
def pop_matrix():
pass
@accepts()
@returns(t.void)
@binds(dll)
def push_matrix():
'''
push and pop the current matrix stack.
'''
@accepts(t.int, t.int, t.sizei, t.sizei, t.enum, t.enum, t.void)
@returns(t.void)
@binds(dll)
def read_pixels(x, y, width, height, format, type, pixels):
'''
read a block of pixels from the frame buffer.
gl.read_pixels and gl.readn_pixels return pixel data from the frame buffer,
starting with the pixel whose lower left corner is at location , into client
memory starting at location data. Several parameters control the processing
of the pixel data before it is placed into client memory. These parameters
are set with gl.pixel_store. This reference page describes the effects on
gl.read_pixels and gl.readn_pixels of most, but not all of the parameters
specified by these three commands.
Args:
x: the window coordinates of the first pixel that is read from the frame
buffer.
y: the window coordinates of the first pixel that is read from the frame
buffer.
width: the dimensions of the pixel rectangle.
height: the dimensions of the pixel rectangle.
format: the format of the pixel data.
type: the data type of the pixel data.
pixels: returns the pixel data.
'''
@accepts(t.fixed, t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def rotatex(angle, x, y, z):
pass
@accepts(t.float, t.boolean)
@returns(t.void)
@binds(dll)
def sample_coverage(value, invert):
'''
specify multisample coverage parameters.
Args:
value: a single floating-point sample coverage value.
invert: a single boolean value representing if the coverage masks should
be inverted.
'''
@accepts(t.clampx, t.boolean)
@returns(t.void)
@binds(dll)
def sample_coveragex(value, invert):
pass
@accepts(t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def scalex(x, y, z):
pass
@accepts(t.int, t.int, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def scissor(x, y, width, height):
'''
define the scissor box.
gl.scissor defines a rectangle, called the scissor box, in window
coordinates. The first two arguments, x and y, specify the lower left corner
of the box. width and height specify the width and height of the box.
Args:
x: the lower left corner of the scissor box.
y: the lower left corner of the scissor box.
width: the width and height of the scissor box.
height: the width and height of the scissor box.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def shade_model(mode):
'''
select flat or smooth shading.
Args:
mode: a symbolic value representing a shading technique.
'''
@accepts(t.enum, t.int, t.uint)
@returns(t.void)
@binds(dll)
def stencil_func(func, ref, mask):
'''
set front and back function and reference value for stencil testing.
Args:
func: the test function.
ref: the reference value for the stencil test.
mask: a mask that is anded with both the reference value and the stored
stencil value when the test is done.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def stencil_mask(mask):
'''
control the front and back writing of individual bits in the stencil planes.
gl.stencil_mask controls the writing of individual bits in the stencil
planes. The least significant n bits of mask, where.
Args:
mask: a bit mask to enable and disable writing of individual bits in the
stencil planes.
'''
@accepts(t.enum, t.enum, t.enum)
@returns(t.void)
@binds(dll)
def stencil_op(fail, zfail, zpass):
'''
set front and back stencil test actions.
gl.stencil_op takes three arguments that indicate what happens to the stored
stencil value while stenciling is enabled. If the stencil test fails, no
change is made to the pixel's color or depth buffers, and sfail specifies
what happens to the stencil buffer contents. The following eight actions are
possible.
Args:
fail: the action to take when the stencil test fails.
zfail: the stencil action when the stencil test passes, but the depth
test fails.
zpass: the stencil action when both the stencil test and the depth test
pass, or when the stencil test passes and either there is no depth
buffer or depth testing is not enabled.
'''
@accepts(t.int, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def tex_coord_pointer(size, type, stride, pointer):
'''
define an array of texture coordinates.
gl.tex_coord_pointer specifies the location and data format of an array of
texture coordinates to use when rendering. size specifies the number of
coordinates per texture coordinate set, and must be 1, 2, 3, or 4. type
specifies the data type of each texture coordinate, and stride specifies the
byte stride from one texture coordinate set to the next, allowing vertices
and attributes to be packed into a single array or stored in separate
arrays. (Single-array storage may be more efficient on some implementations;
see gl.interleaved_arrays.
Args:
size: the number of coordinates per array element.
type: the data type of each texture coordinate.
stride: the byte offset between consecutive texture coordinate sets.
pointer: a pointer to the first coordinate of the first texture
coordinate set in the array.
'''
@accepts(t.enum, t.enum, t.int)
@returns(t.void)
@binds(dll)
def tex_envi(target, pname, param):
pass
@accepts(t.enum, t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def tex_envx(target, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def tex_enviv(target, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def tex_envxv(target, pname, params):
pass
@accepts(t.enum, t.int, t.int, t.sizei, t.sizei, t.int, t.enum, t.enum, t.void)
@returns(t.void)
@binds(dll)
def tex_image2_d(target, level, internalformat, width, height, border, format, type, pixels):
'''
specify a two-dimensional texture image.
Args:
target: the target texture.
level: the level-of-detail number.
internalformat: the number of color components in the texture.
width: the width of the texture image.
height: the height of the texture image, or the number of layers in a
texture array, in the case of the gl_texture_1d_array and
gl_proxy_texture_1d_array targets.
border: this value must be 0.
format: the format of the pixel data.
type: the data type of the pixel data.
pixels: a pointer to the image data in memory.
'''
@accepts(t.enum, t.enum, t.int)
@returns(t.void)
@binds(dll)
def tex_parameteri(target, pname, param):
pass
@accepts(t.enum, t.enum, t.fixed)
@returns(t.void)
@binds(dll)
def tex_parameterx(target, pname, param):
pass
@accepts(t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def tex_parameteriv(target, pname, params):
pass
@accepts(t.enum, t.enum, POINTER(t.fixed))
@returns(t.void)
@binds(dll)
def tex_parameterxv(target, pname, params):
pass
@accepts(t.enum, t.int, t.int, t.int, t.sizei, t.sizei, t.enum, t.enum, t.void)
@returns(t.void)
@binds(dll)
def tex_sub_image2_d(target, level, xoffset, yoffset, width, height, format, type, pixels):
'''
specify a two-dimensional texture subimage.
Args:
target: the target to which the texture is bound for gltexsubimage2d.
level: the level-of-detail number.
xoffset: a texel offset in the x direction within the texture array.
yoffset: a texel offset in the y direction within the texture array.
width: the width of the texture subimage.
height: the height of the texture subimage.
format: the format of the pixel data.
type: the data type of the pixel data.
pixels: a pointer to the image data in memory.
'''
@accepts(t.fixed, t.fixed, t.fixed)
@returns(t.void)
@binds(dll)
def translatex(x, y, z):
pass
@accepts(t.int, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def vertex_pointer(size, type, stride, pointer):
'''
define an array of vertex data.
gl.vertex_pointer specifies the location and data format of an array of
vertex coordinates to use when rendering. size specifies the number of
coordinates per vertex, and must be 2, 3, or 4. type specifies the data type
of each coordinate, and stride specifies the byte stride from one vertex to
the next, allowing vertices and attributes to be packed into a single array
or stored in separate arrays. (Single-array storage may be more efficient on
some implementations; see gl.interleaved_arrays.
Args:
size: the number of coordinates per vertex.
type: the data type of each coordinate in the array.
stride: the byte offset between consecutive vertices.
pointer: a pointer to the first coordinate of the first vertex in the
array.
'''
@accepts(t.int, t.int, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def viewport(x, y, width, height):
'''
set the viewport.
gl.viewport specifies the affine transformation of x and.
Args:
x: the lower left corner of the viewport rectangle, in pixels.
y: the lower left corner of the viewport rectangle, in pixels.
width: the width and height of the viewport.
height: the width and height of the viewport.
'''
|
SilentPenguin/OpenGL.py
|
opengl/gl/raw/gl_es_cm_1_0.py
|
Python
|
apache-2.0
| 51,087
|
"""This is complete MIME handling package, provides fast parser
and models for handling mime.
Rationale
--------
* Standard python parser is slow at parsing big messages, it takes ~1 second
and a couple of millions of ops to parse 11MB message.
* Not very memory efficient, as it splits the message into array of lines,
and joins them after the parsing.
* Does not preserve the original encodings when altering the message.
The new parser is:
* Fast, it takes ~50 millisecond and ~2K operations to parse 11 MB message.
* Memory efficient, as it stores the message in one string.
* Tracks changes and returns unchanged parts unchanged upon serialization.
* Converts headers to unicode, detects and preserves encodings when possible.
Parser drawbacks:
* Parser is strict, when the MIME is broken, raises MimeError and does
not attempt to fix anything except simple errors (like mistyped charsets)
Alternatives:
If you still need to process the broken MIME, use
flanker.mime.fallback.FallbackMessage that relies on python parser in terms
of fixing the broken MIME and forces broken encodings in bodies and headers,
but beware that it can loose some information because of broken
or unknown encodings.
Examples
-------
>> from flanker import mime
>> msg = mime.from_string(message_string)
# unicode multi-value dictionary with headers
msg.headers
# useful content_type member with predicates:
msg.content_type.is_multipart()
msg.content_type.is_singlepart()
msg.content_type.is_message_container()
#decoded body of the message
if msg.content_type.is_singlepart():
msg.body
# parts if message is multipart
if msg.content_type.is_multipart():
msg.parts
# enclosed message
if msg.content_type.is_message_container():
msg.enclosed
read more in package details.
"""
from flanker.mime.message.errors import DecodingError, EncodingError, MimeError
from flanker.mime import create
from flanker.mime.create import from_string
from flanker.mime.message.fallback.create import from_string as recover
from flanker.mime.message.headers.parametrized import fix_content_type
|
mailgun/flanker
|
flanker/mime/__init__.py
|
Python
|
apache-2.0
| 2,085
|
'''
Created on 06.01.2015
@author: Iris
'''
from Container import container
from Component import Component
class Tag(Component):
'''
Tag class for tagging components
'''
def __init__(self, tag):
'''
Constructor
'''
Component.__init__(self)
self.uid = 'tag_' + tag + self.uid
self._tag = tag
def activate(self):
Component.activate(self)
container.registerTag(self._tag, self.parent)
if not hasattr(self.parent, 'tags'):
self.parent.tags = []
self.parent.tags.append(self._tag)
self.parent.hasTag = self._hasTag
def deactivate(self):
self.parent.tags.remove(self._tag)
if len(self.parent.tags) == 0:
del self.parent.tags
container.deregisterTag(self._tag, self.parent)
Component.deactivate(self)
def _hasTag(self, tag):
return tag in self.parent.tags
|
irisSchaffer/component-based-asteroids
|
Component/Tag.py
|
Python
|
apache-2.0
| 978
|
"""
CAVEAT EMPTOR: Tests require to be launched top-down!
Notes:
If you inspect Gerrit manually after running these
"""
from __future__ import print_function
import pytest
import fixtures
from gerrit import Client, AuthenticationError
from gerrit.model import ChangeDetails, Patch
def setup_module():
fixtures.setup()
def gerrit():
return Client(fixtures.GERRIT_URL)
def authenticated_gerrit():
client = gerrit()
client.authenticate(method='become', username=fixtures.USERNAME)
return client
def first_line(text):
return text.strip().split('\n')[0].strip()
def first_para(text):
lines = [line.strip() for line in text.strip().split('\n')]
result = []
for line in lines:
if not line:
return ' '.join(result)
result.append(line)
return ' '.join(result)
def test_projects():
client = gerrit()
projects = client.projects()
assert any(project.name == fixtures.TEST_PROJECT for project in projects)
def test_changes():
client = gerrit()
changes = client.changes(search={'project': fixtures.TEST_PROJECT})
assert set(change.name for change in changes) == \
set((first_para(fixtures.COMMIT_i),
first_para(fixtures.COMMIT_b1_2),
first_para(fixtures.COMMIT_b2),))
assert changes[0].project_name == fixtures.TEST_PROJECT
def test_change_details():
client = gerrit()
# We need to get the id at a cost of a slightly more brittle test
changes = client.changes(search={'project': fixtures.TEST_PROJECT})
change = [ch for ch in changes
if ch.name == first_para(fixtures.COMMIT_b2)][0]
change = client.change_details(change)
print(change)
assert change.name == first_para(fixtures.COMMIT_b2)
assert change.message == fixtures.COMMIT_b2
assert change.status == ChangeDetails.IN_PROGRESS
assert len(change.messages) == 0
def test_patchset_details():
client = gerrit()
# We need to get the id at a cost of a slightly more brittle test
changes = client.changes(search={'project': fixtures.TEST_PROJECT})
change = [ch for ch in changes
if ch.name == first_para(fixtures.COMMIT_b1_2)][0]
change = client.change_details(change)
patchset = change.last_patchset_details
assert len(patchset.patches) == 2
assert patchset.patches[-1].change_type == Patch.MODIFIED
assert patchset.patches[-1].insertions == 5
assert patchset.patches[-1].deletions == 1
def test_review():
client = authenticated_gerrit()
REVIEW_COMMENT = "YES, THIS IS COMMENT"
# We need to get the id at a cost of a slightly more brittle test
changes = client.changes(search={'project': fixtures.TEST_PROJECT})
change = [ch for ch in changes
if ch.name == first_para(fixtures.COMMIT_b2)][0]
change = client.change_details(change)
patchset = change.last_patchset_details
client.publish_review(patchset, REVIEW_COMMENT)
change = client.change_details(change)
assert len(change.messages) == 1
assert change.messages[0].message.strip().endswith(REVIEW_COMMENT)
def test_commenting():
client = authenticated_gerrit()
# We need to get the id at a cost of a slightly more brittle test
changes = client.changes(search={'project': fixtures.TEST_PROJECT})
change = [ch for ch in changes
if ch.name == first_para(fixtures.COMMIT_b2)][0]
change = client.change_details(change)
patchset = change.last_patchset_details
client.save_comment(patchset.patches[-1], 2, 'Hello!')
return # FIXME: actually do some testing here!
# skipping - if someone has any idea how to create a simple LDAP server,
# leave me a note
def _test_invalid_authentication():
client = gerrit()
with pytest.raises(AuthenticationError):
client.authenticate(method='user_pass',
username=fixtures.USERNAME,
password=fixtures.PASSWORD + '___')
# skipping - see note above
def _test_authentication():
client = gerrit()
client.authenticate(method='user_pass',
username=fixtures.USERNAME,
password=fixtures.PASSWORD)
def test_account_without_signing_in():
client = gerrit()
assert client.account() is None
def test_dev_authentication():
client = gerrit()
client.account()
client.authenticate(method='become',
username=fixtures.USERNAME)
assert client.account().user_name == fixtures.USERNAME
|
bcwaldon/python-gerrit
|
tests/test_gerrit.py
|
Python
|
apache-2.0
| 4,616
|
#!/usr/bin/env python
#
# This script loads a trained LSTM net and starts
# interactive session with a chat bot interface
# that can verbally respond or fire API calls
#
import json
import click
import requests
import numpy as np
from modules.entities import EntityTracker
from modules.bow import BowEncoder
from modules.lstm_net import LSTM_net
from modules.embed import UtteranceEmbed
class InteractiveSession(object):
def __init__(self, entity_file, ckpt_dir, text_model, api_url):
# Persist API url
self.api_url = api_url
# Load saved data from training and restore static objects
with open("%s/bow_vocab.json" % ckpt_dir) as fd:
vocab = json.load(fd)
with open("%s/action_templates.json" % ckpt_dir) as fd:
self.action_templates = json.load(fd)
with open("%s/action_map.json" % ckpt_dir) as fd:
self.action_map = json.load(fd)
self.action_size = len(self.action_templates)
self.bow_enc = BowEncoder(vocab=vocab)
self.emb = UtteranceEmbed(text_model)
self.entity_file = entity_file
et = EntityTracker(entity_file)
obs_size = self.emb.dim + self.bow_enc.vocab_size + et.num_features
nb_hidden = 128
self.net = LSTM_net(obs_size=obs_size,
action_size=self.action_size,
nb_hidden=nb_hidden,
ckpt_dir=ckpt_dir)
# restore checkpoint
self.net.restore()
def api_call(self, body):
"""
Fires an POST API call on the supplied URL with the body
:return: 'True' if a successful POST else 'False'
"""
response = requests.post(self.api_url, json=body)
if response.status_code == 200:
return True
return False
def interact(self):
"""
Live interaction using the trained LSTM net
:return: None
"""
# initialize entity tracker
et = EntityTracker(self.entity_file)
# reset network
self.net.reset_state()
# begin interaction loop
while True:
# get input from user
u = input('[USER] >> ')
# check if user wants to begin new session
if u == 'clear' or u == 'reset' or u == 'restart':
self.net.reset_state()
et = EntityTracker(self.entity_file)
print('\n -- reset -- \n')
# check for exit command
elif u == 'exit' or u == 'stop' or u == 'quit':
print('\n -- exit -- \n')
break
else:
# Blank response taken as <SILENCE> wild card
if not u:
u = '<SILENCE>'
# encode
et.extract_entities(u)
u_ent_features = et.context_features()
u_emb = self.emb.encode(u)
u_bow = self.bow_enc.encode(u)
# concat features
features = np.concatenate((u_ent_features, u_emb, u_bow), axis=0)
# get action mask
action_mask = et.action_mask(self.action_map, self.action_size)
# forward
prediction = self.net.forward(features, action_mask)
resp_text = self.action_templates[prediction]
for entity_key in et.entities.keys():
if resp_text and entity_key in resp_text:
if et.entities[entity_key]:
resp_text = resp_text.replace(entity_key, et.entities[entity_key])
if 'api_call' in resp_text:
response = self.api_call(et.entities)
if response:
print('[BOT] >> ', resp_text, ' [SUCCESS]')
else:
print('[BOT] >> ', resp_text, ' [FAIL]')
else:
print('[BOT] >> ', resp_text.capitalize())
print('')
@click.command()
@click.option('--ckpt-dir', default='ckpt',
help='Checkpoint directory to store artifacts. Default ./ckpt')
@click.option('--text-model', default="embed/text8",
help='A pretrained text8 model file with component files. '
'Default "embed/text8"')
@click.option('--entity-file', default=None,
help='A JSON file containing a dict of entities. '
'Format: {"<entity1>": ["val1", "val2"], "<entity2>": ["val1"]}')
@click.option('--api-url', default=None,
help='URL to fire the api call')
def main(ckpt_dir, text_model, entity_file, api_url):
# setup session
sess = InteractiveSession(
entity_file=entity_file,
text_model=text_model,
ckpt_dir=ckpt_dir,
api_url=api_url
)
# begin interaction
sess.interact()
if __name__ == '__main__':
main()
|
DrigerG/IIITB-ML
|
project/HCN/interact.py
|
Python
|
apache-2.0
| 4,952
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflowjs import version
# File name for the indexing JSON file in an artifact directory.
ARTIFACT_MODEL_JSON_FILE_NAME = 'model.json'
# JSON string keys for fields of the indexing JSON.
ARTIFACT_MODEL_TOPOLOGY_KEY = 'modelTopology'
ARTIFACT_MODEL_INITIALIZER = 'modelInitializer'
ARTIFACT_WEIGHTS_MANIFEST_KEY = 'weightsManifest'
FORMAT_KEY = 'format'
TFJS_GRAPH_MODEL_FORMAT = 'graph-model'
TFJS_LAYERS_MODEL_FORMAT = 'layers-model'
GENERATED_BY_KEY = 'generatedBy'
CONVERTED_BY_KEY = 'convertedBy'
SIGNATURE_KEY = 'signature'
USER_DEFINED_METADATA_KEY = 'userDefinedMetadata'
# Model formats.
KERAS_SAVED_MODEL = 'keras_saved_model'
KERAS_MODEL = 'keras'
TF_SAVED_MODEL = 'tf_saved_model'
TF_HUB_MODEL = 'tf_hub'
TFJS_GRAPH_MODEL = 'tfjs_graph_model'
TFJS_LAYERS_MODEL = 'tfjs_layers_model'
TF_FROZEN_MODEL = 'tf_frozen_model'
# CLI argument strings.
INPUT_PATH = 'input_path'
OUTPUT_PATH = 'output_path'
INPUT_FORMAT = 'input_format'
OUTPUT_FORMAT = 'output_format'
OUTPUT_NODE = 'output_node_names'
SIGNATURE_NAME = 'signature_name'
SAVED_MODEL_TAGS = 'saved_model_tags'
QUANTIZATION_BYTES = 'quantization_bytes'
QUANTIZATION_TYPE_FLOAT16 = 'quantize_float16'
QUANTIZATION_TYPE_UINT8 = 'quantize_uint8'
QUANTIZATION_TYPE_UINT16 = 'quantize_uint16'
SPLIT_WEIGHTS_BY_LAYER = 'split_weights_by_layer'
VERSION = 'version'
SKIP_OP_CHECK = 'skip_op_check'
STRIP_DEBUG_OPS = 'strip_debug_ops'
WEIGHT_SHARD_SIZE_BYTES = 'weight_shard_size_bytes'
CONTROL_FLOW_V2 = 'control_flow_v2'
EXPERIMENTS = 'experiments'
METADATA = 'metadata'
def get_converted_by():
"""Get the convertedBy string for storage in model artifacts."""
return 'TensorFlow.js Converter v%s' % version.version
|
tensorflow/tfjs
|
tfjs-converter/python/tensorflowjs/converters/common.py
|
Python
|
apache-2.0
| 2,349
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a remarketing user list (a.k.a. audience).
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: UserListService.mutate
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import uuid
from googleads import adwords
def main(client):
# Initialize appropriate service.
user_list_service = client.GetService(
'AdwordsUserListService', version='v201406')
conversion_tracker_service = client.GetService(
'ConversionTrackerService', version='v201406')
# Construct operations and add a user list.
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'BasicUserList',
'name': 'Mars cruise customers #%s' % uuid.uuid4(),
'description': 'A list of mars cruise customers in the last year',
'membershipLifeSpan': '365',
'conversionTypes': [
{
'name': ('Mars cruise customers #%s'
% uuid.uuid4())
}
],
# Optional field.
'status': 'OPEN',
}
}
]
result = user_list_service.mutate(operations)
# Display results.
if 'value' in result:
conversion_ids = []
for user_list in result['value']:
if user_list['conversionTypes']:
for conversion_type in user_list['conversionTypes']:
conversion_ids.append(conversion_type['id'])
selector = {
'fields': ['Name', 'Status', 'Category'],
'predicates': [{
'field': 'Id',
'operator': 'IN',
'values': conversion_ids
}],
'ordering': [{
'field': 'Name',
'sortOrder': 'ASCENDING'
}]
}
pages = conversion_tracker_service.get(selector)
conversions_map = {}
if pages['entries']:
for conversion_tracker in pages['entries']:
conversions_map[conversion_tracker['id']] = conversion_tracker
for user_list in result['value']:
print ('User list with name \'%s\' and ID \'%s\' was added.'
% (user_list['name'], user_list['id']))
if user_list['conversionTypes']:
for conversion_type in user_list['conversionTypes']:
conversion_tracker = conversions_map[conversion_type['id']]
print ('Conversion type code snippet associated to the list:\n%s\n'
% conversion_tracker['snippet'])
else:
print 'No user lists were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
|
dietrichc/streamline-ppc-reports
|
examples/adwords/v201406/remarketing/add_audience.py
|
Python
|
apache-2.0
| 3,471
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from .extensions import load_type_extensions
from .extensions import load_typesupport_extensions
def generate(
*,
package_name,
interface_files,
include_paths=None,
output_path=None,
types=None,
typesupports=None
):
"""
Generate source code from interface definition files.
To do so, this function leverages type representation and type
support generation support as provided by third-party package
extensions.
Each path to an interface definition file is a relative path optionally
prefixed by another path followed by a colon ':', against which the first
relative path is to be resolved.
The directory structure that these relative paths exhibit will be replicated
on output (as opposed to the prefix path, which will be ignored).
If no type representation nor type support is specified, all available ones
will be generated.
If more than one type representation or type support is generated, the
name of each will be appended to the given `output_path` to preclude
name clashes upon writing source code files.
:param package_name: name of the package to generate source code for
:param interface_files: list of paths to interface definition files
:param include_paths: optional list of paths to include dependency
interface definition files from
:param output_path: optional path to directory to hold generated
source code files, defaults to the current working directory
:param types: optional list of type representations to generate
:param typesupports: optional list of type supports to generate
:returns: list of lists of paths to generated source code files,
one group per type or type support extension invoked
"""
extensions = []
unspecific_generation = not types and not typesupports
if types or unspecific_generation:
extensions.extend(load_type_extensions(
specs=types,
strict=not unspecific_generation))
if typesupports or unspecific_generation:
extensions.extend(load_typesupport_extensions(
specs=typesupports,
strict=not unspecific_generation))
if unspecific_generation and not extensions:
raise RuntimeError('No type nor typesupport extensions were found')
if include_paths is None:
include_paths = []
if output_path is None:
output_path = pathlib.Path.cwd()
else:
os.makedirs(output_path, exist_ok=True)
if len(extensions) > 1:
return [
extension.generate(
package_name, interface_files, include_paths,
output_path=output_path / extension.name)
for extension in extensions
]
return [extensions[0].generate(
package_name, interface_files,
include_paths, output_path
)]
|
ros2/rosidl
|
rosidl_cli/rosidl_cli/command/generate/api.py
|
Python
|
apache-2.0
| 3,504
|
"""This module contains multiple constants collections typically used when
communicating metrics and findings with Teamscale."""
from __future__ import absolute_import
from __future__ import unicode_literals
class Assessment:
"""Constants to be used as assessment levels."""
RED = "RED"
YELLOW = "YELLOW"
class AssessmentMetricColors:
"""Constants used for colors in assessment metrics. """
RED = "RED"
YELLOW = "YELLOW"
GREEN = "GREEN"
class Enablement:
"""The enablement describes which rating a finding should receive."""
RED = "RED"
"""The finding should always be rated red."""
YELLOW = "YELLOW"
"""The finding should always be rated yellow."""
AUTO = "AUTO"
"""The assessment provided by the concrete finding is used."""
OFF = "OFF"
"""The finding is disabled by default."""
class MetricAggregation:
"""Class that contains valid aggregation strategies."""
SUM = "SUM"
MAX = "MAX"
MIN = "MIN"
class MetricValueType:
"""Metric value types."""
NUMERIC = "NUMERIC"
TIMESTAMP = "TIMESTAMP"
ASSESSMENT = "ASSESSMENT"
class MetricProperties:
"""Possible properties used in metric definitions."""
SIZE_METRIC = "SIZE_METRIC"
"""Normal number counting metric."""
RATIO_METRIC = "RATIO_METRIC"
"""Metric is a percentage value between 0 and 1."""
QUALITY_NEUTRAL = "QUALITY_NEUTRAL"
"""Quality neutral metrics can not be assessed/rated (e.g. number of files)"""
LOW_IS_BAD = "LOW_IS_BAD"
"""Normally high values are considered bad, use this to inverse."""
class CoverageFormats:
"""Possible coverage formats that Teamscale can interpret."""
CTC = "CTC"
COBERTURA = "COBERTURA"
GCOV = "GCOV"
LCOV = "LCOV"
XR_BABOON = "XR_BABOON"
JACOCO = "JACOCO"
DOT_COVER = "DOT_COVER"
MS_COVERAGE = "MS_COVERAGE"
ROSLYN = "ROSLYN"
BULLSEYE = "BULLSEYE"
SIMPLE = "SIMPLE"
OPEN_COVER = "OPEN_COVER"
IEC_COVERAGE = "IEC_COVERAGE"
LLVM = "LLVM"
CLOVER = "CLOVER"
XCODE = "XCODE"
TESTWISE_COVERAGE = "TESTWISE_COVERAGE"
SAP_COVERAGE = "SAP_COVERAGE"
ISTANBUL = "ISTANBUL"
class ReportFormats:
"""Report formats that Teamscale understands."""
PCLINT = "PCLINT"
CLANG = "CLANG"
ASTREE = "ASTREE"
FXCOP = "FXCOP"
SPCOP = "SPCOP"
CS_COMPILER_WARNING = "CS_COMPILER_WARNING"
PYLINT = "PYLINT"
FINDBUGS = "FINDBUGS"
class UnitTestReportFormats:
"""Reports for unit test results that Teamscale understands."""
JUNIT = "JUNIT"
XUNIT = "XUNIT"
class ConnectorType:
"""Connector types."""
TFS = "Azure DevOps TFVC (TFS)"
FILE_SYSTEM = "File System"
MULTI_VERSION_FILE_SYSTEM = "Multi-Version File System"
GIT = "Git"
SVN = "Subversion"
GERRIT = "Gerrit"
class TaskStatus:
"""Different statuses a task in Teamscale can have"""
OPEN = "OPEN"
RESOLVED = "RESOLVED"
VERIFIED = "VERIFIED"
DISCARDED = "DISCARDED"
class TaskResolution:
"""Different resolutions used in tasks"""
NONE = "NONE"
FIXED = "FIXED"
INFEASIBLE = "INFEASIBLE"
TOO_MUCH_EFFORT = "TOO_MUCH_EFFORT"
|
cqse/teamscale-client-python
|
teamscale_client/constants.py
|
Python
|
apache-2.0
| 3,287
|
#!/usr/bin/env python
"""
Robocom teleoperator node
"""
import sys, os, time
import roslib
import rospy
from sensor_msgs.msg import Joy
from r2p.msg import Velocity
topic = rospy.get_param('topic', 'robot')
setpoint_scale = rospy.get_param('setpoint_scale', {'x': 1, 'y': 1, 'w': 1})
gear_ratio = rospy.get_param('gear_ratio', (
{'x': 0.25, 'y': 0.25, 'w': 0.25},
{'x': 0.5, 'y': 0.5, 'w': 0.5},
{'x': 0.75, 'y': 0.75, 'w': 0.75},
{'x': 1, 'y': 1, 'w': 1}
))
setpoint = {'x': 0.0,'y': 0.0,'w': 0.0}
gear = 0
restart = False
def joy_cb(msg):
global gear
global setpoint
global restart
if msg.buttons[6]:
restart = True
if msg.buttons[3]:
gear = 3
if msg.buttons[2]:
gear = 2
if msg.buttons[1]:
gear = 1
if msg.buttons[0]:
gear = 0
setpoint['x'] = msg.axes[1] * setpoint_scale['x'] * gear_ratio[gear]['x']
setpoint['y'] = msg.axes[0] * setpoint_scale['y'] * gear_ratio[gear]['y']
setpoint['w'] = msg.axes[3] * setpoint_scale['w'] * gear_ratio[gear]['w']
def main():
global restart
# Initialize ROS stuff
rospy.init_node("teleop_joy")
r = rospy.Rate(20) # 20hz
pubVelocity = rospy.Publisher(topic, Velocity)
pubVelocity.publish(Velocity(0.0, 0.0, 0.0))
subJoy = rospy.Subscriber("/joy", Joy, joy_cb)
while not rospy.is_shutdown():
if restart == True:
pubVelocity.unregister()
rospy.sleep(1)
pubVelocity = rospy.Publisher(topic, Velocity)
pubVelocity.publish(Velocity(0.0, 0.0, 0.0))
restart = False
print setpoint
pubVelocity.publish(Velocity(setpoint['x'], setpoint['y'], setpoint['w']))
r.sleep()
# Stop the robot
pubVelocity.publish(Velocity(0.0, 0.0, 0.0))
# Call the 'main' function when this script is executed
if __name__ == "__main__":
try: main()
except rospy.ROSInterruptException: pass
|
openrobots-dev/ROS
|
src/teleop_joy.py
|
Python
|
apache-2.0
| 1,794
|
##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
import os
import sys
import odframework
import dsattributes
from getopt import getopt, GetoptError
# TODO: Nested groups
# TODO: GroupMembership
masterNodeName = "/LDAPv3/127.0.0.1"
localNodeName = "/Local/Default"
saclGroupNodeName = "/Local/Default"
saclGroupNames = ("com.apple.access_calendar", "com.apple.access_addressbook")
masterUsers = [
(
"odtestamanda",
{
dsattributes.kDS1AttrFirstName : ["Amanda"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Amanda Test"],
dsattributes.kDSNAttrEMailAddress : ["amanda@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A70-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33300"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestbetty",
{
dsattributes.kDS1AttrFirstName : ["Betty"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Betty Test"],
dsattributes.kDSNAttrEMailAddress : ["betty@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A71-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33301"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestcarlene",
{
dsattributes.kDS1AttrFirstName : ["Carlene"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Carlene Test"],
dsattributes.kDSNAttrEMailAddress : ["carlene@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A72-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33302"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestdenise",
{
dsattributes.kDS1AttrFirstName : ["Denise"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Denise Test"],
dsattributes.kDSNAttrEMailAddress : ["denise@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A73-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33303"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestunicode",
{
dsattributes.kDS1AttrFirstName : ["Unicode " + unichr(208)],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Unicode Test " + unichr(208)],
dsattributes.kDSNAttrEMailAddress : ["unicodetest@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["CA795296-D77A-4E09-A72F-869920A3D284"],
dsattributes.kDS1AttrUniqueID : ["33304"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestat@sign",
{
dsattributes.kDS1AttrFirstName : ["AtSign"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["At Sign Test"],
dsattributes.kDSNAttrEMailAddress : ["attsign@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["71646A3A-1CEF-4744-AB1D-0AC855E25DC8"],
dsattributes.kDS1AttrUniqueID : ["33305"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestsatou",
{
dsattributes.kDS1AttrFirstName : ["\xe4\xbd\x90\xe8\x97\xa4\xe4\xbd\x90\xe8\x97\xa4\xe4\xbd\x90\xe8\x97\xa4".decode("utf-8")],
dsattributes.kDS1AttrLastName : ["Test \xe4\xbd\x90\xe8\x97\xa4".decode("utf-8")],
dsattributes.kDS1AttrDistinguishedName : ["\xe4\xbd\x90\xe8\x97\xa4\xe4\xbd\x90\xe8\x97\xa4\xe4\xbd\x90\xe8\x97\xa4 Test \xe4\xbd\x90\xe8\x97\xa4".decode("utf-8")],
dsattributes.kDSNAttrEMailAddress : ["satou@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["C662F833-75AD-4589-9879-5FF102943CEF"],
dsattributes.kDS1AttrUniqueID : ["33306"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"anotherodtestamanda",
{
dsattributes.kDS1AttrFirstName : ["Amanda"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Amanda Test"],
dsattributes.kDSNAttrEMailAddress : ["anotheramanda@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["E7666814-6D92-49EC-8562-8C4C3D64A4B0"],
dsattributes.kDS1AttrUniqueID : ["33307"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
]
masterGroups = [
(
"odtestsubgroupb",
{
dsattributes.kDS1AttrGeneratedUID : ["6C6CD282-E6E3-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrDistinguishedName : ["OD Test Subgroup B"],
dsattributes.kDSNAttrGroupMembers : ["9DC04A72-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrPrimaryGroupID : ["33401"],
},
),
(
"odtestgrouptop",
{
dsattributes.kDS1AttrGeneratedUID : ["6C6CD280-E6E3-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrDistinguishedName : ["OD Test Group Top"],
dsattributes.kDSNAttrGroupMembers : ["9DC04A70-E6DD-11DF-9492-0800200C9A66", "9DC04A71-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDSNAttrNestedGroups : ["6C6CD282-E6E3-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrPrimaryGroupID : ["33400"],
},
),
(
"odtestgroupbetty",
{
dsattributes.kDS1AttrGeneratedUID : ["2A1F3ED9-D1B3-40F2-8FC4-05E197C1F90C"],
dsattributes.kDS1AttrDistinguishedName : ["OD Test Group Betty"],
dsattributes.kDSNAttrGroupMembers : [],
dsattributes.kDSNAttrNestedGroups : [],
dsattributes.kDS1AttrPrimaryGroupID : ["33403"],
},
),
]
localUsers = [
(
"odtestalbert",
{
dsattributes.kDS1AttrFirstName : ["Albert"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Albert Test"],
dsattributes.kDSNAttrEMailAddress : ["albert@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A74-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33350"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestbill",
{
dsattributes.kDS1AttrFirstName : ["Bill"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Bill Test"],
dsattributes.kDSNAttrEMailAddress : ["bill@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A75-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33351"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestcarl",
{
dsattributes.kDS1AttrFirstName : ["Carl"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Carl Test"],
dsattributes.kDSNAttrEMailAddress : ["carl@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A76-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33352"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"odtestdavid",
{
dsattributes.kDS1AttrFirstName : ["David"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["David Test"],
dsattributes.kDSNAttrEMailAddress : ["david@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["9DC04A77-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrUniqueID : ["33353"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
(
"anotherodtestalbert",
{
dsattributes.kDS1AttrFirstName : ["Albert"],
dsattributes.kDS1AttrLastName : ["Test"],
dsattributes.kDS1AttrDistinguishedName : ["Albert Test"],
dsattributes.kDSNAttrEMailAddress : ["anotheralbert@example.com"],
dsattributes.kDS1AttrGeneratedUID : ["8F059F1B-1CD0-42B5-BEA2-6A36C9B5620F"],
dsattributes.kDS1AttrUniqueID : ["33354"],
dsattributes.kDS1AttrPrimaryGroupID : ["20"],
},
),
]
localGroups = [
(
"odtestsubgroupa",
{
dsattributes.kDS1AttrGeneratedUID : ["6C6CD281-E6E3-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrDistinguishedName : ["OD Test Subgroup A"],
dsattributes.kDSNAttrGroupMembers : ["9DC04A74-E6DD-11DF-9492-0800200C9A66", "9DC04A75-E6DD-11DF-9492-0800200C9A66"],
dsattributes.kDS1AttrPrimaryGroupID : ["33402"],
},
),
(
"odtestgroupalbert",
{
dsattributes.kDS1AttrGeneratedUID : ["3F4D01B8-FDFD-4805-A853-DE9879A2D951"],
dsattributes.kDS1AttrDistinguishedName : ["OD Test Group Albert"],
dsattributes.kDSNAttrGroupMembers : [],
dsattributes.kDS1AttrPrimaryGroupID : ["33404"],
},
),
]
def usage(e=None):
name = os.path.basename(sys.argv[0])
print("usage: %s [options] local_user local_password odmaster_user odmaster_password" % (name,))
print("")
print(" Configures local and OD master directories for testing")
print("")
print("options:")
print(" -h --help: print this help and exit")
if e:
sys.exit(1)
else:
sys.exit(0)
def lookupRecordName(node, recordType, name):
query, error = odframework.ODQuery.queryWithNode_forRecordTypes_attribute_matchType_queryValues_returnAttributes_maximumResults_error_(
node,
recordType,
dsattributes.kDSNAttrRecordName,
dsattributes.eDSExact,
name,
None,
0,
None)
if error:
raise ODError(error)
records, error = query.resultsAllowingPartial_error_(False, None)
if error:
raise ODError(error)
if len(records) < 1:
return None
if len(records) > 1:
raise ODError("Multiple records for '%s' were found" % (name,))
return records[0]
def createRecord(node, recordType, recordName, attrs):
record, error = node.createRecordWithRecordType_name_attributes_error_(
recordType,
recordName,
attrs,
None)
if error:
print(error)
raise ODError(error)
return record
def main():
try:
(optargs, args) = getopt(sys.argv[1:], "h", ["help"])
except GetoptError, e:
usage(e)
for opt, _ignore_arg in optargs:
if opt in ("-h", "--help"):
usage()
if len(args) != 4:
usage()
localUser, localPassword, masterUser, masterPassword = args
userInfo = {
masterNodeName : {
"user" : masterUser,
"password" : masterPassword,
"users" : masterUsers,
"groups" : masterGroups,
},
localNodeName : {
"user" : localUser,
"password" : localPassword,
"users" : localUsers,
"groups" : localGroups,
},
}
session = odframework.ODSession.defaultSession()
userRecords = []
for nodeName, info in userInfo.iteritems():
userName = info["user"]
password = info["password"]
users = info["users"]
groups = info["groups"]
node, error = odframework.ODNode.nodeWithSession_name_error_(session, nodeName, None)
if error:
print(error)
raise ODError(error)
result, error = node.setCredentialsWithRecordType_recordName_password_error_(
dsattributes.kDSStdRecordTypeUsers,
userName,
password,
None
)
if error:
print("Unable to authenticate with directory %s: %s" % (nodeName, error))
raise ODError(error)
print("Successfully authenticated with directory %s" % (nodeName,))
print("Creating users within %s:" % (nodeName,))
for recordName, attrs in users:
record = lookupRecordName(node, dsattributes.kDSStdRecordTypeUsers, recordName)
if record is None:
print("Creating user %s" % (recordName,))
try:
record = createRecord(node, dsattributes.kDSStdRecordTypeUsers, recordName, attrs)
print("Successfully created user %s" % (recordName,))
result, error = record.changePassword_toPassword_error_(
None, "password", None)
if error or not result:
print("Failed to set password for %s: %s" % (recordName, error))
else:
print("Successfully set password for %s" % (recordName,))
except ODError, e:
print("Failed to create user %s: %s" % (recordName, e))
else:
print("User %s already exists" % (recordName,))
if record is not None:
userRecords.append(record)
print("Creating groups within %s:" % (nodeName,))
for recordName, attrs in groups:
record = lookupRecordName(node, dsattributes.kDSStdRecordTypeGroups, recordName)
if record is None:
print("Creating group %s" % (recordName,))
try:
record = createRecord(node, dsattributes.kDSStdRecordTypeGroups, recordName, attrs)
print("Successfully created group %s" % (recordName,))
except ODError, e:
print("Failed to create group %s: %s" % (recordName, e))
else:
print("Group %s already exists" % (recordName,))
print
# Populate SACL groups
node, error = odframework.ODNode.nodeWithSession_name_error_(session, saclGroupNodeName, None)
result, error = node.setCredentialsWithRecordType_recordName_password_error_(
dsattributes.kDSStdRecordTypeUsers,
userInfo[saclGroupNodeName]["user"],
userInfo[saclGroupNodeName]["password"],
None
)
if not error:
for saclGroupName in saclGroupNames:
saclGroupRecord = lookupRecordName(node, dsattributes.kDSStdRecordTypeGroups, saclGroupName)
if saclGroupRecord:
print("Populating %s SACL group:" % (saclGroupName,))
for userRecord in userRecords:
details, error = userRecord.recordDetailsForAttributes_error_(None, None)
recordName = details.get(dsattributes.kDSNAttrRecordName, [None])[0]
result, error = saclGroupRecord.isMemberRecord_error_(userRecord, None)
if result:
print("%s is already in the %s SACL group" % (recordName, saclGroupName))
else:
result, error = saclGroupRecord.addMemberRecord_error_(userRecord, None)
print("Adding %s to the %s SACL group" % (recordName, saclGroupName))
print("")
class ODError(Exception):
def __init__(self, error):
self.message = (str(error), error.code())
if __name__ == "__main__":
main()
|
red-hood/calendarserver
|
contrib/od/setup_directory.py
|
Python
|
apache-2.0
| 16,267
|
#!/usr/bin/env python
"""This is the GRR client."""
import pdb
import logging
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.client import comms
from grr.client import installer
from grr.client import stdlib
from grr.lib import client_startup
from grr.lib import config_lib
from grr.lib import flags
flags.DEFINE_bool("install", False, "Specify this to install the client.")
flags.DEFINE_bool("break_on_start", False,
"If True break into a pdb shell immediately on startup. This"
" can be used for debugging the client manually.")
flags.DEFINE_bool("debug_client_actions", False,
"If True break into a pdb shell before executing any client"
" action.")
def main(unused_args):
# Allow per platform configuration.
config_lib.CONFIG.AddContext(
"Client Context", "Context applied when we run the client process.")
client_startup.ClientInit()
if flags.FLAGS.install:
installer.RunInstaller()
errors = config_lib.CONFIG.Validate(["Client", "CA", "Logging"])
if errors and errors.keys() != ["Client.private_key"]:
raise config_lib.ConfigFormatError(errors)
enrollment_necessary = not config_lib.CONFIG.Get("Client.private_key")
# Instantiating the client will create a private_key so we need to use a flag.
client = comms.GRRHTTPClient(
ca_cert=config_lib.CONFIG["CA.certificate"],
private_key=config_lib.CONFIG.Get("Client.private_key", default=None))
if enrollment_necessary:
logging.info("No private key found, starting enrollment.")
client.InitiateEnrolment()
if flags.FLAGS.break_on_start:
pdb.set_trace()
else:
client.Run()
if __name__ == "__main__":
flags.StartMain(main)
|
destijl/grr
|
grr/client/client.py
|
Python
|
apache-2.0
| 1,795
|
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.contrib.admin.views.decorators import staff_member_required
from django.conf import settings
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.utils import simplejson
try:
from offensivecontent import registry
except ImportError:
from registration import registry
from offensivecontent.models import OffensiveContent, OffensiveContentData
from offensivecontent.forms import MarkForm
def _user_has_marked(user, content_type_id, object_id):
try:
OffensiveContentData.objects.get(
user__pk=user.pk,
offensive_content__content_type__pk=content_type_id,
offensive_content__object_id=object_id)
return True
except OffensiveContentData.DoesNotExist, OffensiveContentData.MultipleObjectsReturned:
return False
@login_required
def add_ajax(request, content_type_id, object_id):
if not request.is_ajax():
return HttpResponseRedirect("/")
if _user_has_marked(request.user, content_type_id, object_id):
return HttpResponse(content=simplejson.dumps({'result': False}))
ctype = get_object_or_404(ContentType, pk=content_type_id)
site = get_object_or_404(Site, pk=settings.SITE_ID)
obj = get_object_or_404(ctype.model_class(), pk=object_id)
oc, created = OffensiveContent.objects.get_or_create(
content_type=ctype, object_id=object_id, site=site)
ocd = OffensiveContentData(user=request.user, offensive_content=oc)
ocd.save()
return HttpResponse(content=simplejson.dumps({'result': True}))
@login_required
def add(request, content_type_id, object_id,
template_name="offensivecontent/form.html"):
return_url = '/'
if 'ret' in request.GET:
return_url = request.GET['ret']
ctype = get_object_or_404(ContentType, pk=content_type_id)
site = get_object_or_404(Site, pk=settings.SITE_ID)
obj = get_object_or_404(ctype.model_class(), pk=object_id)
if not registry.is_registered(ctype.model_class()):
raise Http404
if request.method == "POST":
if _user_has_marked(request.user, content_type_id, object_id):
return HttpResponseRedirect(return_url)
form = MarkForm(request.POST)
if form.is_valid():
oc, created = OffensiveContent.objects.get_or_create(
content_type=ctype, object_id=object_id, site=site)
data = form.save(commit=False)
data.user = request.user
data.offensive_content = oc
data.save()
return HttpResponseRedirect(return_url)
else:
form = MarkForm()
return render_to_response(template_name,
{'form': form,
'obj': obj,
'ctype': ctype,
'return_url': return_url},
context_instance=RequestContext(request))
@staff_member_required
def mark_safe(request, object_id, is_safe=False,
template_name='offensivecontent/admin/confirm_form.html'):
oc = get_object_or_404(OffensiveContent, pk=object_id)
ctype = oc.content_type
obj = oc.object_id
if request.method == "POST":
if "confirm" in request.POST:
controller = registry.get_controller_for_model(ctype.model_class())
controller.enable_content(oc.content_object)
oc.is_safe = True
oc.save()
if REDIRECT_FIELD_NAME in request.POST:
return HttpResponseRedirect(request.POST[REDIRECT_FIELD_NAME])
else:
return HttpResponseRedirect("/admin/")
if REDIRECT_FIELD_NAME in request.GET:
redirect = '<input type="hidden" name="%s" value="%s">' % (REDIRECT_FIELD_NAME, request.GET[REDIRECT_FIELD_NAME])
else:
redirect = '/admin/'
return render_to_response(template_name,
{'message': "Are you sure you want to mark this content as safe?",
'ctype': ctype,
'obj': oc.content_object,
'redirect': redirect},
context_instance=RequestContext(request))
MESSAGES = {
'disable_user': "Are you sure you want to disable the user for this content?",
'enable_user': "Are you sure you want to enable the user for this content?",
'disable_content': "Are you sure you want to disable this content?",
'enable_content': "Are you sure you want to enable this content?"
}
@staff_member_required
def content_cotroller(request, object_id,
template_name="offensivecontent/admin/confirm_form.html", method=None):
ocontent = OffensiveContent.objects.get(pk=object_id)
if request.method == "POST":
if "confirm" in request.POST:
controller = registry.get_controller_for_model(ocontent.content_type.model_class())
getattr(controller, method)(ocontent.content_object)
if method == 'disable_content' or method == 'disable_user':
ocontent.is_safe=False
elif method == 'enable_content' or method == 'disable_user':
ocontent.is_safe=True
ocontent.save()
if REDIRECT_FIELD_NAME in request.POST:
return HttpResponseRedirect(request.POST[REDIRECT_FIELD_NAME])
else:
return HttpResponseRedirect("/admin/")
if REDIRECT_FIELD_NAME in request.GET:
redirect = '<input type="hidden" name="%s" value="%s">' % (REDIRECT_FIELD_NAME, request.GET[REDIRECT_FIELD_NAME])
else:
redirect = '/admin/'
return render_to_response(template_name,
{'obj': ocontent.content_object,
'message': MESSAGES[method],
'redirect': redirect,},
context_instance=RequestContext(request))
|
callowayproject/django-offensivecontent
|
offensivecontent/views.py
|
Python
|
apache-2.0
| 6,436
|
from share import ProviderAppConfig
from .harvester import FigshareHarvester
from .normalizer import FigshareNormalizer
class AppConfig(ProviderAppConfig):
name = 'providers.com.figshare'
version = '0.0.1'
title = 'figshare'
long_title = 'figshare'
harvester = FigshareHarvester
normalizer = FigshareNormalizer
home_page = 'https://figshare.com/'
|
zamattiac/SHARE
|
providers/com/figshare/apps.py
|
Python
|
apache-2.0
| 378
|
'''
Class that handles all communal communications with the master sever
'''
import threading
from getMetrics import getPerfServer
import time
from ccmd.tools.helpers import logApp
# this Thread will send periodically performance data about he whole instance to the master server
# it also can be used to send messages to it or wait for its ACk messages
class senderThread(threading.Thread):
def __init__(self, connection,frequency):
threading.Thread.__init__(self)
self.c = connection
self.Terminated = False
self.frequency = frequency
self.nb_of_sent_msgs = 0
def run(self):
logApp("Sender thread started")
while not self.Terminated:
time.sleep(float(self.frequency))
#self.c.send( getPerfServer());
self.c.close()
def sendMsg(self,msg):
logApp("SenderThread TX :: %s" %msg)
self.nb_of_sent_msgs=self.nb_of_sent_msgs+1
self.c.sendall(msg)
def getAck(self):
logApp("getAck: waiting...")
rx = self.c.recv(1024)
logApp("SenderThread send :: %s" %rx)
return rx
def stop(self):
self.Terminated = True
logApp("SenderThread terminating #send msg %d" %self.nb_of_sent_msgs)
def getNbMsg(self):
return self.nb_of_sent_msgs
|
Northshoot/maestro
|
worker/senderThread.py
|
Python
|
apache-2.0
| 1,239
|
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class PackageInfo(object):
def __init__(self, packageName, packageVersion='', installLocation='',
configFile='', initdScriptName='', repository=''):
self.packageName = packageName
self.packageVersion = packageVersion
self.installLocation = installLocation
self.configFile = configFile
self.initdScriptName = initdScriptName
self.repository = repository
|
StratusLab/client
|
api/code/src/main/python/stratuslab/system/PackageInfo.py
|
Python
|
apache-2.0
| 1,159
|
# -*- coding: utf-8 -*-
import re
import types
import logging
import subprocess
from ..exceptions import (ExecuteRuntimeError, ScriptRuntimeError,
NetworkError)
from .strings import mask_string
block_fmt = ("\n============= %(title)s ==========\n%(content)s\n"
"======== END OF %(title)s ========")
def execute(cmd, workdir=None, can_fail=True, mask_list=None,
use_shell=False, log=True):
"""
Runs shell command cmd. If can_fail is set to False
ExecuteRuntimeError is raised if command returned non-zero return
code. Otherwise
"""
mask_list = mask_list or []
repl_list = [("'", "'\\''")]
if not isinstance(cmd, types.StringType):
import pipes
masked = ' '.join((pipes.quote(i) for i in cmd))
else:
masked = cmd
masked = mask_string(masked, mask_list, repl_list)
if log:
logging.info("Executing command:\n%s" % masked)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=workdir,
shell=use_shell, close_fds=True)
out, err = proc.communicate()
masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if log:
logging.debug(block_fmt % {'title': 'STDOUT', 'content': masked_out})
if proc.returncode:
if log:
logging.debug(block_fmt % {'title': 'STDERR',
'content': masked_err})
if can_fail:
msg = 'Failed to execute command: %s' % masked_out
raise ExecuteRuntimeError(msg, stdout=out, stderr=err)
return proc.returncode, out
class ScriptRunner(object):
_pkg_search = 'rpm -q --whatprovides'
def __init__(self, ip=None):
self.script = []
self.ip = ip
def append(self, s):
self.script.append(s)
def clear(self):
self.script = []
def execute(self, can_fail=True, mask_list=None, log=True):
mask_list = mask_list or []
repl_list = [("'", "'\\''")]
script = "\n".join(self.script)
masked = mask_string(script, mask_list, repl_list)
if log:
logging.info("[%s] Executing script:\n%s" %
(self.ip or 'localhost', masked))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if self.ip:
cmd = ["ssh", "-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"root@%s" % self.ip, "bash -x"]
else:
cmd = ["bash", "-x"]
obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE,
close_fds=True, shell=False)
script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script)
masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if log:
logging.debug(block_fmt % {'title': 'STDOUT',
'content': masked_out})
if obj.returncode:
if log:
logging.debug(block_fmt % {'title': 'STDERR',
'content': masked_err})
if can_fail:
pattern = (r'^ssh\:')
if re.search(pattern, err):
raise NetworkError(masked_err, stdout=out, stderr=err)
else:
msg = 'Failed to run remote script: %s' % masked_out
raise ScriptRuntimeError(msg, stdout=out, stderr=err)
return obj.returncode, out
def template(self, src, dst, varsdict):
with open(src) as fp:
content = fp.read() % varsdict
self.append("cat > %s <<- EOF\n%s\nEOF\n" % (dst, content))
def if_not_exists(self, path, command):
self.append("[ -e %s ] || %s" % (path, command))
def if_exists(self, path, command):
self.append("[ -e %s ] && %s" % (path, command))
def if_installed(self, pkg, command):
self.append("%s %s && %s" % (self._pkg_search, pkg, command))
def if_not_installed(self, pkg, command):
self.append("%s %s || %s" % (self._pkg_search, pkg, command))
def chown(self, target, uid, gid):
self.append("chown %s:%s %s" % (uid, gid, target))
def chmod(self, target, mode):
self.append("chmod %s %s" % (mode, target))
|
harveyzh/packstack
|
packstack/installer/utils/shell.py
|
Python
|
apache-2.0
| 4,527
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import mock
from neutron_lib import exceptions as exception
from oslo_config import cfg
import six.moves.urllib.request as urlrequest
import testtools
import webob
import webob.exc
from neutron.common import exceptions as n_exc
from neutron.db import api
from neutron.tests import base
from neutron.tests.common import helpers
from neutron import wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'var'))
def open_no_proxy(*args, **kwargs):
# NOTE(jamespage):
# Deal with more secure certification chain verification
# introduced in python 2.7.9 under PEP-0476
# https://github.com/python/peps/blob/master/pep-0476.txt
if hasattr(ssl, "_create_unverified_context"):
opener = urlrequest.build_opener(
urlrequest.ProxyHandler({}),
urlrequest.HTTPSHandler(context=ssl._create_unverified_context())
)
else:
opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
return opener.open(*args, **kwargs)
class TestServiceBase(base.BaseTestCase):
"""Service tests base."""
@mock.patch("neutron.policy.refresh")
@mock.patch("neutron.common.config.setup_logging")
def _test_reset(self, worker_service, setup_logging_mock, refresh_mock):
worker_service.reset()
setup_logging_mock.assert_called_once_with()
refresh_mock.assert_called_once_with()
class TestWorkerService(TestServiceBase):
"""WorkerService tests."""
@mock.patch('neutron.db.api.get_engine')
def test_start_withoutdb_call(self, apimock):
# clear engine from other tests
api._FACADE = None
_service = mock.Mock()
_service.pool.spawn.return_value = None
_app = mock.Mock()
workerservice = wsgi.WorkerService(_service, _app)
workerservice.start()
self.assertFalse(apimock.called)
def test_reset(self):
_service = mock.Mock()
_app = mock.Mock()
worker_service = wsgi.WorkerService(_service, _app)
self._test_reset(worker_service)
class TestWSGIServer(base.BaseTestCase):
"""WSGI server tests."""
def test_start_random_port(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="127.0.0.1")
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@mock.patch('oslo_service.service.ProcessLauncher')
def test_start_multiple_workers(self, ProcessLauncher):
launcher = ProcessLauncher.return_value
server = wsgi.Server("test_multiple_processes")
server.start(None, 0, host="127.0.0.1", workers=2)
launcher.launch_service.assert_called_once_with(mock.ANY, workers=2)
server.stop()
launcher.stop.assert_called_once_with()
server.wait()
launcher.wait.assert_called_once_with()
def test_start_random_port_with_ipv6(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="::1")
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_ipv6_listen_called_with_scope(self):
server = wsgi.Server("test_app")
with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
mock_get_addr.return_value = [
(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
]
with mock.patch.object(server, 'pool') as mock_pool:
server.start(None,
1234,
host="fe80::204:acff:fe96:da87%eth0")
mock_get_addr.assert_called_once_with(
"fe80::204:acff:fe96:da87%eth0",
1234,
socket.AF_UNSPEC,
socket.SOCK_STREAM
)
mock_listen.assert_called_once_with(
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
family=socket.AF_INET6,
backlog=cfg.CONF.backlog
)
mock_pool.spawn.assert_has_calls([
mock.call(
server._run,
None,
mock_listen.return_value.dup.return_value)
])
def test_app(self):
greetings = b'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_disable_ssl(self):
CONF.set_default('use_ssl', True)
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app", disable_ssl=True)
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings.encode('utf-8'), response.read())
server.stop()
@mock.patch.object(wsgi, 'eventlet')
def test__run(self, eventlet_mock):
server = wsgi.Server('test')
server._run("app", "socket")
eventlet_mock.wsgi.server.assert_called_once_with(
'socket',
'app',
max_size=server.num_threads,
log=mock.ANY,
keepalive=CONF.wsgi_keep_alive,
socket_timeout=server.client_socket_timeout
)
class SerializerTest(base.BaseTestCase):
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
input_dict = {'servers': {'test': 'pass'}}
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType, serializer.serialize,
input_dict, content_type)
def test_get_deserialize_handler_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType,
serializer.get_deserialize_handler, content_type)
def test_serialize_content_type_json(self):
"""Test serialize with content type json."""
input_data = {'servers': ['test=pass']}
content_type = 'application/json'
serializer = wsgi.Serializer()
result = serializer.serialize(input_data, content_type)
self.assertEqual(b'{"servers": ["test=pass"]}', result)
def test_deserialize_raise_bad_request(self):
"""Test serialize verifies that exception is raises."""
content_type = 'application/unknown'
data_string = 'test'
serializer = wsgi.Serializer()
self.assertRaises(
webob.exc.HTTPBadRequest,
serializer.deserialize, data_string, content_type)
def test_deserialize_json_content_type(self):
"""Test Serializer.deserialize with content type json."""
content_type = 'application/json'
data_string = '{"servers": ["test=pass"]}'
serializer = wsgi.Serializer()
result = serializer.deserialize(data_string, content_type)
self.assertEqual({'body': {u'servers': [u'test=pass']}}, result)
class RequestDeserializerTest(testtools.TestCase):
def setUp(self):
super(RequestDeserializerTest, self).setUp()
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
self.body_deserializers = {'application/json': JSONDeserializer()}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def test_get_deserializer(self):
"""Test RequestDeserializer.get_body_deserializer."""
expected_json_serializer = self.deserializer.get_body_deserializer(
'application/json')
self.assertEqual(
expected_json_serializer,
self.body_deserializers['application/json'])
def test_get_expected_content_type(self):
"""Test RequestDeserializer.get_expected_content_type."""
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual('application/json',
self.deserializer.get_expected_content_type(request))
def test_get_action_args(self):
"""Test RequestDeserializer.get_action_args."""
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12}]}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected,
self.deserializer.get_action_args(env))
def test_deserialize(self):
"""Test RequestDeserializer.deserialize."""
with mock.patch.object(
self.deserializer, 'get_action_args') as mock_method:
mock_method.return_value = {'action': 'create'}
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/json')
self.assertEqual(expected, deserialized)
def test_get_body_deserializer_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
deserializer = wsgi.RequestDeserializer()
self.assertRaises(
exception.InvalidContentType,
deserializer.get_body_deserializer, content_type)
class ResponseSerializerTest(testtools.TestCase):
def setUp(self):
super(ResponseSerializerTest, self).setUp()
class JSONSerializer(object):
def serialize(self, data, action='default'):
return b'pew_json'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {'application/json': JSONSerializer()}
self.serializer = wsgi.ResponseSerializer(
self.body_serializers, HeadersSerializer())
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
def test_get_body_serializer(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.get_body_serializer, 'application/unknown')
def test_get_serializer(self):
"""Test ResponseSerializer.get_body_serializer."""
content_type = 'application/json'
self.assertEqual(self.body_serializers[content_type],
self.serializer.get_body_serializer(content_type))
def test_serialize_json_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'pew_json', response.body)
self.assertEqual(404, response.status_int)
def test_serialize_response_None(self):
response = self.serializer.serialize(
None, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'', response.body)
self.assertEqual(404, response.status_int)
class RequestTest(base.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = b"fake<br />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/new-type;"
self.assertIsNone(request.get_content_type())
def test_content_type_from_accept(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/new_type"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
class ActionDispatcherTest(base.BaseTestCase):
def test_dispatch(self):
"""Test ActionDispatcher.dispatch."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x
self.assertEqual('pants',
serializer.dispatch('pants', action='create'))
def test_dispatch_action_None(self):
"""Test ActionDispatcher.dispatch with none action."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action='update'))
class ResponseHeadersSerializerTest(base.BaseTestCase):
def test_default(self):
serializer = wsgi.ResponseHeaderSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'fake')
self.assertEqual(200, response.status_int)
def test_custom(self):
class Serializer(wsgi.ResponseHeaderSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(404, response.status_int)
self.assertEqual('123', response.headers['X-Custom-Header'])
class DictSerializerTest(base.BaseTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('',
serializer.serialize({}, 'NonExistentAction'))
class JSONDictSerializerTest(base.BaseTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = b'{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
# The tested behaviour is only meant to be witnessed in Python 2, so it is
# OK to skip this test with Python 3.
@helpers.requires_py2
def test_json_with_utf8(self):
input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c')))
expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
def test_json_with_unicode(self):
input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc')))
expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
class TextDeserializerTest(base.BaseTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({},
deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(base.BaseTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1'}}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_default_raise_Malformed_Exception(self):
"""Test JsonDeserializer.default.
Test verifies JsonDeserializer.default raises exception
MalformedRequestBody correctly.
"""
data_string = ""
deserializer = wsgi.JSONDeserializer()
self.assertRaises(
n_exc.MalformedRequestBody, deserializer.default, data_string)
def test_json_with_utf8(self):
data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_json_with_unicode(self):
data = b'{"a": "\u7f51\u7edc"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
class RequestHeadersDeserializerTest(base.BaseTestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual({},
deserializer.deserialize(req, 'nonExistent'))
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual({'a': 'b'},
deserializer.deserialize(req, 'update'))
class ResourceTest(base.BaseTestCase):
@staticmethod
def my_fault_body_function():
return 'off'
class Controller(object):
def index(self, request, index=None):
return index
def test_dispatch(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
actual = resource.dispatch(
resource.controller, 'index', action_args={'index': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_dispatch_unknown_controller_action(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
self.assertRaises(
AttributeError, resource.dispatch,
resource.controller, 'create', {})
def test_malformed_request_body_throws_bad_request(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", body=b"{mal:formed", method='POST',
headers={'Content-Type': "application/json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_throws_unsupported_media_type_error(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", body=b"{some:json}", method='POST',
headers={'Content-Type': "xxx"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_server_error(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = resource(request)
self.assertEqual(500, response.status_int)
def test_call_resource_class_bad_request(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = 'body'
def method(self):
pass
def best_match_content_type(self):
return 'best_match_content_type'
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(400, result.status_int)
def test_type_error(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "json"})
response = resource.dispatch(
request, action='index', action_args='test')
self.assertEqual(400, response.status_int)
def test_call_resource_class_internal_error(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = '{"Content-Type": "json"}'
def method(self):
pass
def best_match_content_type(self):
return 'application/json'
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(500, result.status_int)
class FaultTest(base.BaseTestCase):
def test_call_fault(self):
class MyException(object):
status_int = 415
explanation = 'test'
my_exceptions = MyException()
my_fault = wsgi.Fault(exception=my_exceptions)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = my_fault(request)
self.assertEqual(415, response.status_int)
|
bigswitch/neutron
|
neutron/tests/unit/test_wsgi.py
|
Python
|
apache-2.0
| 25,931
|
#!/usr/bin/env python2.7
"""
Script to extract names and aliases from Freebase RDF dump.
"""
import gzip
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
log = logging.getLogger()
log.setLevel(logging.INFO) # DEBUG, INFO, WARN, ERROR, CRITICAL
def read_mids(f):
log.info('Reading mids..')
mids = frozenset([m.strip() for m in gzip.open(f, 'rb').readlines()])
log.info('..done (%d).' % len(mids))
return mids
def mid(s):
s = s.strip('<>')
if s.startswith('http://rdf.freebase.com/ns/m.'):
return s[27:]
def main(dumpf, midsf, namef, aliasf):
mid_set = read_mids(midsf)
name_fh = open(namef, 'w')
alias_fh = open(aliasf, 'w')
log.info('Scanning dump for names/aliases..')
for i, line in enumerate(gzip.open(dumpf, 'rb')):
if i % 1000000 == 0:
log.info('..%d..' % i)
fields = line.strip().split('\t')
if len(fields) != 4:
log.warn('Unexpected format: %s' % line)
s, p, o, t = fields
if mid(s) in mid_set:
p = p.strip('<>')
if p.endswith('type.object.name'):
name_fh.write(line)
elif p.endswith('common.topic.alias'):
alias_fh.write(line)
log.info('..done.')
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser(description='Extract names/aliases from FB')
p.add_argument('dump', help='Path to Freebase RDF dump file')
p.add_argument('mids', help='List of MIDs to extract')
p.add_argument('names', help='Output file for names')
p.add_argument('aliases', help='Output file for aliases')
args = p.parse_args()
main(args.dump, args.mids, args.names, args.aliases)
|
benhachey/fbutil
|
alias/extract.py
|
Python
|
apache-2.0
| 1,721
|
from upseto import gitwrapper
from upseto import run
import subprocess
class RootfsLabel:
def __init__(self, rootfs, product="rootfs"):
self._rootfs = rootfs
self._product = product
if rootfs == "THIS":
self._label = run.run([
"solvent", "printlabel", "--thisProject", "--product=%s" % (self._product,)]).strip()
wrapper = gitwrapper.GitWrapper(".")
self._hint = wrapper.originURLBasename()
elif self._labelExists(self._rootfs):
self._label = self._rootfs
self._hint = self._rootfs
elif "__" in self._rootfs:
repository, product = self._rootfs.split("__")
self._label = run.run([
"solvent", "printlabel", "--repositoryBasename", repository, "--product", product]).strip()
self._hint = repository
else:
self._label = run.run([
"solvent", "printlabel", "--repositoryBasename", rootfs,
"--product=%s" % (self._product,)]).strip()
self._hint = rootfs
def label(self):
return self._label
def imageHint(self):
return self._hint
def _labelExists(self, label):
with open("/dev/null", "w") as out:
return subprocess.call(["solvent", "labelexists", "--label", label], stdout=out, stderr=out) == 0
|
Stratoscale/pyracktest
|
py/strato/racktest/infra/rootfslabel.py
|
Python
|
apache-2.0
| 1,376
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __builtin__
import datetime
import hashlib
import os
import os.path
import StringIO
import tempfile
import mox
import nova
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova import utils
CONF = cfg.CONF
CONF.import_opt('glance_host', 'nova.config')
CONF.import_opt('glance_port', 'nova.config')
CONF.import_opt('glance_protocol', 'nova.config')
class ByteConversionTest(test.TestCase):
def test_string_conversions(self):
working_examples = {
'1024KB': 1048576,
'1024TB': 1125899906842624,
'1024K': 1048576,
'1024T': 1125899906842624,
'1TB': 1099511627776,
'1T': 1099511627776,
'1KB': 1024,
'1K': 1024,
'1B': 1,
'1B': 1,
'1': 1,
'1MB': 1048576,
'7MB': 7340032,
'0MB': 0,
'0KB': 0,
'0TB': 0,
'': 0,
}
for (in_value, expected_value) in working_examples.items():
b_value = utils.to_bytes(in_value)
self.assertEquals(expected_value, b_value)
if len(in_value):
in_value = "-" + in_value
b_value = utils.to_bytes(in_value)
self.assertEquals(expected_value * -1, b_value)
breaking_examples = [
'junk1KB', '1023BBBB',
]
for v in breaking_examples:
self.assertRaises(TypeError, utils.to_bytes, v)
class ExecuteTestCase(test.TestCase):
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0755)
self.assertRaises(exception.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEquals(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEquals(runs, 10,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(exception.NovaException,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(exception.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0755)
utils.execute(tmpfilename,
tmpfilename2,
process_input='foo',
attempts=2)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
input = []
self.assertEquals([], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [None]
self.assertEquals([], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': None}]
self.assertEquals([], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': None}}]
self.assertEquals([{'b': None}], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}]
self.assertEquals([{'b': {'c': None}}], f(input, "a"))
self.assertEquals([{'c': None}], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': None}]
self.assertEquals([{'b': {'c': None}}], f(input, "a"))
self.assertEquals([{'c': None}], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a"))
self.assertEquals([{'c': None}], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
def test_does_select(self):
f = utils.get_from_path
input = [{'a': 'a_1'}]
self.assertEquals(['a_1'], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': 'b_1'}}]
self.assertEquals([{'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}]
self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
self.assertEquals(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
self.assertEquals(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': None}}]
self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
self.assertEquals(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': {'c': 'c_2'}}}]
self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
f(input, "a"))
self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c"))
self.assertEquals([], f(input, "a/b/c/d"))
self.assertEquals([], f(input, "c/a/b/d"))
self.assertEquals([], f(input, "i/r/t"))
def test_flattens_lists(self):
f = utils.get_from_path
input = [{'a': [1, 2, 3]}]
self.assertEquals([1, 2, 3], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}]
self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEquals([1, 2, 3], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = [{'a': [1, 2, {'b': 'b_1'}]}]
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
def test_bad_xpath(self):
f = utils.get_from_path
self.assertRaises(exception.NovaException, f, [], None)
self.assertRaises(exception.NovaException, f, [], "")
self.assertRaises(exception.NovaException, f, [], "/")
self.assertRaises(exception.NovaException, f, [], "/a")
self.assertRaises(exception.NovaException, f, [], "/a/")
self.assertRaises(exception.NovaException, f, [], "//")
self.assertRaises(exception.NovaException, f, [], "//a")
self.assertRaises(exception.NovaException, f, [], "a//a")
self.assertRaises(exception.NovaException, f, [], "a//a/")
self.assertRaises(exception.NovaException, f, [], "a/a/")
def test_real_failure1(self):
# Real world failure case...
# We weren't coping when the input was a Dictionary instead of a List
# This led to test_accepts_dictionaries
f = utils.get_from_path
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
'address': '192.168.0.3'},
'hostname': ''}
private_ips = f(inst, 'fixed_ip/address')
public_ips = f(inst, 'fixed_ip/floating_ips/address')
self.assertEquals(['192.168.0.3'], private_ips)
self.assertEquals(['1.2.3.4'], public_ips)
def test_accepts_dictionaries(self):
f = utils.get_from_path
input = {'a': [1, 2, 3]}
self.assertEquals([1, 2, 3], f(input, "a"))
self.assertEquals([], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = {'a': {'b': [1, 2, 3]}}
self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEquals([1, 2, 3], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEquals([], f(input, "a/b/c"))
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
class GenericUtilsTestCase(test.TestCase):
def test_parse_server_string(self):
result = utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_bool_from_str(self):
self.assertTrue(utils.bool_from_str('1'))
self.assertTrue(utils.bool_from_str('2'))
self.assertTrue(utils.bool_from_str('-1'))
self.assertTrue(utils.bool_from_str('true'))
self.assertTrue(utils.bool_from_str('True'))
self.assertTrue(utils.bool_from_str('tRuE'))
self.assertTrue(utils.bool_from_str('yes'))
self.assertTrue(utils.bool_from_str('Yes'))
self.assertTrue(utils.bool_from_str('YeS'))
self.assertTrue(utils.bool_from_str('y'))
self.assertTrue(utils.bool_from_str('Y'))
self.assertFalse(utils.bool_from_str('False'))
self.assertFalse(utils.bool_from_str('false'))
self.assertFalse(utils.bool_from_str('no'))
self.assertFalse(utils.bool_from_str('No'))
self.assertFalse(utils.bool_from_str('n'))
self.assertFalse(utils.bool_from_str('N'))
self.assertFalse(utils.bool_from_str('0'))
self.assertFalse(utils.bool_from_str(None))
self.assertFalse(utils.bool_from_str('junk'))
def test_generate_glance_http_url(self):
generated_url = utils.generate_glance_url()
http_url = "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(glance_protocol="https")
generated_url = utils.generate_glance_url()
https_url = "https://%s:%d" % (CONF.glance_host, CONF.glance_port)
self.assertEqual(generated_url, https_url)
def test_read_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
self.mox.ReplayAll()
cache_data = {"data": 1123, "mtime": 1}
data = utils.read_cached_file("/this/is/a/fake", cache_data)
self.assertEqual(cache_data["data"], data)
def test_read_modified_cached_file(self):
self.mox.StubOutWithMock(os.path, "getmtime")
self.mox.StubOutWithMock(__builtin__, 'open')
os.path.getmtime(mox.IgnoreArg()).AndReturn(2)
fake_contents = "lorem ipsum"
fake_file = self.mox.CreateMockAnything()
fake_file.read().AndReturn(fake_contents)
fake_context_manager = self.mox.CreateMockAnything()
fake_context_manager.__enter__().AndReturn(fake_file)
fake_context_manager.__exit__(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
__builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
self.mox.ReplayAll()
cache_data = {"data": 1123, "mtime": 1}
self.reload_called = False
def test_reload(reloaded_data):
self.assertEqual(reloaded_data, fake_contents)
self.reload_called = True
data = utils.read_cached_file("/this/is/a/fake", cache_data,
reload_func=test_reload)
self.assertEqual(data, fake_contents)
self.assertTrue(self.reload_called)
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
def test_read_file_as_root(self):
def fake_execute(*args, **kwargs):
if args[1] == 'bad':
raise exception.ProcessExecutionError
return 'fakecontents', None
self.stubs.Set(utils, 'execute', fake_execute)
contents = utils.read_file_as_root('good')
self.assertEqual(contents, 'fakecontents')
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
def test_strcmp_const_time(self):
self.assertTrue(utils.strcmp_const_time('abc123', 'abc123'))
self.assertFalse(utils.strcmp_const_time('a', 'aaaaa'))
self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123'))
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
fake_execute.uid = args[1]
self.stubs.Set(utils, 'execute', fake_execute)
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
self.assertEqual('&', utils.xhtml_escape('&'))
self.assertEqual('>', utils.xhtml_escape('>'))
self.assertEqual('<', utils.xhtml_escape('<'))
self.assertEqual('<foo>', utils.xhtml_escape('<foo>'))
def test_hash_file(self):
data = 'Mary had a little lamb, its fleece as white as snow'
flo = StringIO.StringIO(data)
h1 = utils.hash_file(flo)
h2 = hashlib.sha1(data).hexdigest()
self.assertEquals(h1, h2)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'nova.tests.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package + 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
nova.tests.monkey_patch_example.CALLED_FUNCTION = []
from nova.tests.monkey_patch_example import example_a
from nova.tests.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(package_a + 'example_function_a'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(package_b + 'example_function_b'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
#a fairly random time to test with
self.test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
timeutils.set_time_override(override_time=self.test_time)
def tearDown(self):
timeutils.clear_time_override()
super(AuditPeriodTest, self).tearDown()
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEquals(begin, datetime.datetime(
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEquals(begin, datetime.datetime(
minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEquals(begin, datetime.datetime(
minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEquals(begin, datetime.datetime(
day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEquals(begin, datetime.datetime(
hour=6,
day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEquals(begin, datetime.datetime(
hour=10,
day=3,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEquals(begin, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(
day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEquals(begin, datetime.datetime(
day=2,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(
day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEquals(begin, datetime.datetime(
day=15,
month=1,
year=2012))
self.assertEquals(end, datetime.datetime(
day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEquals(begin, datetime.datetime(
day=1,
month=1,
year=2011))
self.assertEquals(end, datetime.datetime(
day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEquals(begin, datetime.datetime(
day=1,
month=2,
year=2011))
self.assertEquals(end, datetime.datetime(
day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEquals(begin, datetime.datetime(
day=1,
month=6,
year=2010))
self.assertEquals(end, datetime.datetime(
day=1,
month=6,
year=2011))
class DiffDict(test.TestCase):
"""Unit tests for diff_dict()"""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3)
diff = utils.diff_dict(old, new)
self.assertEqual(diff, {})
def test_new_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3, d=4)
diff = utils.diff_dict(old, new)
self.assertEqual(diff, dict(d=['+', 4]))
def test_changed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=4, c=3)
diff = utils.diff_dict(old, new)
self.assertEqual(diff, dict(b=['+', 4]))
def test_removed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, c=3)
diff = utils.diff_dict(old, new)
self.assertEqual(diff, dict(b=['-']))
class MkfsTestCase(test.TestCase):
def test_mkfs(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev')
utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev')
utils.execute('mkswap', '/my/swap/block/dev')
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev')
utils.mkfs('msdos', '/my/msdos/block/dev')
utils.mkfs('swap', '/my/swap/block/dev')
def test_mkfs_with_label(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F',
'-L', 'ext4-vol', '/my/block/dev')
utils.execute('mkfs', '-t', 'msdos',
'-n', 'msdos-vol', '/my/msdos/block/dev')
utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev')
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
class LastBytesTestCase(test.TestCase):
"""Test the last_bytes() utility method."""
def setUp(self):
super(LastBytesTestCase, self).setUp()
self.f = StringIO.StringIO('1234567890')
def test_truncated(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 5)
self.assertEqual(out, '67890')
self.assertTrue(remaining > 0)
def test_read_all(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 1000)
self.assertEqual(out, '1234567890')
self.assertFalse(remaining > 0)
def test_seek_too_far_real_file(self):
# StringIO doesn't raise IOError if you see past the start of the file.
flo = tempfile.TemporaryFile()
content = '1234567890'
flo.write(content)
self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
|
houshengbo/nova_vmware_compute_driver
|
nova/tests/test_utils.py
|
Python
|
apache-2.0
| 31,310
|
"""Base class for IKEA TRADFRI."""
from functools import wraps
import logging
from pytradfri.error import PytradfriError
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def handle_error(func):
"""Handle tradfri api call error."""
@wraps(func)
async def wrapper(command):
"""Decorate api call."""
try:
await func(command)
except PytradfriError as err:
_LOGGER.error("Unable to execute command %s: %s", command, err)
return wrapper
class TradfriBaseClass(Entity):
"""Base class for IKEA TRADFRI.
All devices and groups should ultimately inherit from this class.
"""
_attr_should_poll = False
def __init__(self, device, api, gateway_id):
"""Initialize a device."""
self._api = handle_error(api)
self._device = None
self._device_control = None
self._device_data = None
self._gateway_id = gateway_id
self._refresh(device)
@callback
def _async_start_observe(self, exc=None):
"""Start observation of device."""
if exc:
self.async_write_ha_state()
_LOGGER.warning("Observation failed for %s", self._attr_name, exc_info=exc)
try:
cmd = self._device.observe(
callback=self._observe_update,
err_callback=self._async_start_observe,
duration=0,
)
self.hass.async_create_task(self._api(cmd))
except PytradfriError as err:
_LOGGER.warning("Observation failed, trying again", exc_info=err)
self._async_start_observe()
async def async_added_to_hass(self):
"""Start thread when added to hass."""
self._async_start_observe()
@callback
def _observe_update(self, device):
"""Receive new state data for this device."""
self._refresh(device)
self.async_write_ha_state()
def _refresh(self, device):
"""Refresh the device data."""
self._device = device
self._attr_name = device.name
class TradfriBaseDevice(TradfriBaseClass):
"""Base class for a TRADFRI device.
All devices should inherit from this class.
"""
@property
def device_info(self):
"""Return the device info."""
info = self._device.device_info
return {
"identifiers": {(DOMAIN, self._device.id)},
"manufacturer": info.manufacturer,
"model": info.model_number,
"name": self._attr_name,
"sw_version": info.firmware_version,
"via_device": (DOMAIN, self._gateway_id),
}
def _refresh(self, device):
"""Refresh the device data."""
super()._refresh(device)
self._attr_available = device.reachable
|
Danielhiversen/home-assistant
|
homeassistant/components/tradfri/base_class.py
|
Python
|
apache-2.0
| 2,898
|
# -*- coding: utf-8 -*-
'''
Salt module to manage unix mounts and the fstab file
'''
# Import python libs
import os
import re
import logging
# Import salt libs
import salt.utils
from salt._compat import string_types
from salt.utils import which as _which
from salt.exceptions import CommandNotFoundError, CommandExecutionError
# Set up logger
log = logging.getLogger(__name__)
def _active_mountinfo(ret):
filename = '/proc/self/mountinfo'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
device = comps[2].split(':')
ret[comps[4]] = {'mountid': comps[0],
'parentid': comps[1],
'major': device[0],
'minor': device[1],
'root': comps[3],
'opts': comps[5].split(','),
'fstype': comps[7],
'device': comps[8],
'superopts': comps[9].split(',')}
return ret
def _active_mounts(ret):
filename = '/proc/self/mounts'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def _active_mounts_freebsd(ret):
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def active():
'''
List the active mounts.
CLI Example:
.. code-block:: bash
salt '*' mount.active
'''
ret = {}
if __grains__['os'] in ('FreeBSD'):
_active_mounts_freebsd(ret)
else:
try:
_active_mountinfo(ret)
except CommandExecutionError:
_active_mounts(ret)
return ret
def fstab(config='/etc/fstab'):
'''
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.fopen(config) as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
continue
if not line.strip():
# Blank line
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
continue
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(','),
'dump': comps[4],
'pass': comps[5]}
return ret
def rm_fstab(name, config='/etc/fstab'):
'''
Remove the mount point from the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.rm_fstab /mnt/foo
'''
contents = fstab(config)
if name not in contents:
return True
# The entry is present, get rid of it
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
lines.append(line)
continue
comps = line.split()
if comps[1] == name:
continue
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
return True
def set_fstab(
name,
device,
fstype,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
test=False,
**kwargs):
'''
Verify that this mount is represented in the fstab, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
present = False
if not os.path.isfile(config):
raise CommandExecutionError('Bad config file "{0}"'.format(config))
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
lines.append(line)
continue
if comps[1] == name:
# check to see if there are changes
# and fix them if there are any
present = True
if comps[0] != device:
change = True
comps[0] = device
if comps[2] != fstype:
change = True
comps[2] = fstype
if comps[3] != opts:
change = True
comps[3] = opts
if comps[4] != str(dump):
change = True
comps[4] = str(dump)
if comps[5] != str(pass_num):
change = True
comps[5] = str(pass_num)
if change:
log.debug(
'fstab entry for mount point {0} needs to be '
'updated'.format(name)
)
newline = (
'{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
device, name, fstype, opts, dump, pass_num
)
)
lines.append(newline)
else:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
if change:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return 'change'
if not change:
if present:
# The right entry is already here
return 'present'
else:
if not salt.utils.test_mode(test=test, **kwargs):
# The entry is new, add it to the end of the fstab
newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
device,
name,
fstype,
opts,
dump,
pass_num)
lines.append(newline)
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
raise CommandExecutionError(
'File not writable {0}'.format(
config
)
)
return 'new'
def mount(name, device, mkmnt=False, fstype='', opts='defaults'):
'''
Mount a device
CLI Example:
.. code-block:: bash
salt '*' mount.mount /mnt/foo /dev/sdz1 True
'''
if isinstance(opts, string_types):
opts = opts.split(',')
if not os.path.exists(name) and mkmnt:
os.makedirs(name)
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd)
if out['retcode']:
return out['stderr']
return True
def remount(name, device, mkmnt=False, fstype='', opts='defaults'):
'''
Attempt to remount a device, if the device is not already mounted, mount
is called
CLI Example:
.. code-block:: bash
salt '*' mount.remount /mnt/foo /dev/sdz1 True
'''
if isinstance(opts, string_types):
opts = opts.split(',')
mnts = active()
if name in mnts:
# The mount point is mounted, attempt to remount it with the given data
if 'remount' not in opts:
opts.append('remount')
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd)
if out['retcode']:
return out['stderr']
return True
# Mount a filesystem that isn't already
return mount(name, device, mkmnt, fstype, opts)
def umount(name):
'''
Attempt to unmount a device by specifying the directory it is mounted on
CLI Example:
.. code-block:: bash
salt '*' mount.umount /mnt/foo
'''
mnts = active()
if name not in mnts:
return "{0} does not have anything mounted".format(name)
cmd = 'umount {0}'.format(name)
out = __salt__['cmd.run_all'](cmd)
if out['retcode']:
return out['stderr']
return True
def is_fuse_exec(cmd):
'''
Returns true if the command passed is a fuse mountable application.
CLI Example:
.. code-block:: bash
salt '*' mount.is_fuse_exec sshfs
'''
cmd_path = _which(cmd)
# No point in running ldd on a command that doesn't exist
if not cmd_path:
return False
elif not _which('ldd'):
raise CommandNotFoundError('ldd')
out = __salt__['cmd.run']('ldd {0}'.format(cmd_path))
return 'libfuse' in out
def swaps():
'''
Return a dict containing information on active swap
CLI Example:
.. code-block:: bash
salt '*' mount.swaps
'''
ret = {}
with salt.utils.fopen('/proc/swaps') as fp_:
for line in fp_:
if line.startswith('Filename'):
continue
comps = line.split()
ret[comps[0]] = {
'type': comps[1],
'size': comps[2],
'used': comps[3],
'priority': comps[4]}
return ret
def swapon(name, priority=None):
'''
Activate a swap disk
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
cmd = 'swapon {0}'.format(name)
if priority:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret
def swapoff(name):
'''
Deactivate a named swap mount
CLI Example:
.. code-block:: bash
salt '*' mount.swapoff /root/swapfile
'''
on_ = swaps()
if name in on_:
__salt__['cmd.run']('swapoff {0}'.format(name))
on_ = swaps()
if name in on_:
return False
return True
return None
|
victorywang80/Maintenance
|
saltstack/src/salt/modules/mount.py
|
Python
|
apache-2.0
| 12,839
|
'''
Created on Sep 3, 2010
@author: Wilder Rodrigues (wilder.rodrigues@ekholabs.nl)
'''
'''
This file comprises a few functions used to manipulate lists, queues and stacks. It shows
the behavior of those data structures.
'''
'''
Imports the deque object from the collections module.
'''
from collections import deque
'''
This function only shows how to use few methods from a list
(e.g. append, index, insert, reverse, sort, etc). It is useful to get in touch
with the possibilities when using a list.
'''
def using_list(x, list):
print 'list size', len(list)
'''
Adds the given object to the end of the list.
'''
list.append(x)
print list.index(x)
print 'list size', len(list)
'''
Adds the given objects (in that case, the 'x' concatenated with itself)
to the given position (index) in the list.
'''
list.insert(0, x + x)
print list
list.reverse()
print list
'''
Sorts the list elements.
'''
list.sort()
print list
'''
Remove and returns the last element of the list. You can also
inform an index to be used by this function.
e.g.: list.pop(0) will remove and return the first element.
'''
print list.pop()
print list
'''
This functions shows how to use a list as a stack (LIFO).
'''
def lifo(x, stack):
print stack
'''
Adds the given object to the stack (goes to the top of it)
'''
stack.append(x)
print stack
'''
Adds more objects to the top of the stack.
'''
stack.append(x + 2)
stack.append(x * 2)
print stack
'''
Adds more objects to the top of the stack.
'''
for i in range(50, 100, 30):
stack.append(i)
print stack
'''
This loop iterates over the stack and removes its elements, from top to bottom.
The function pop(), as told before, removes and return the last (top) element
of the stack.
'''
for i in range(0, len(stack)):
stack.pop()
print stack
'''
This function uses a list to implement a queue (FIFO)
'''
def fifo(x, q):
'''
Creates a queue using the given list.
'''
queue = deque(q)
print queue
'''
The object given as parameters arrives into the queue. It goes to the end
of the queue.
'''
queue.append(x)
print queue
'''
Grandmother is 60 years old. She has priority and goes to the beginning
of the queue. Use queue.appendleft() function for this.
'''
queue.appendleft('Grandmother')
'''
Goes to the end of the queue.
'''
queue.append('Robert')
print queue
'''
This while loop iterates over the queue and removes its elements.
The popleft() function removes the elements in the front of the queue.
'''
while len(queue) != 0:
print queue.popleft()
print queue
|
ekholabs/python
|
src/nl/ekholabs/sample/python/structure/lists.py
|
Python
|
apache-2.0
| 3,034
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudevents.http import CloudEvent
import main
def test_functions_log_cloudevent_should_print_message(capsys):
attributes = {
"source": "test",
"type": "google.cloud.audit.log.v1.written",
"subject": "storage.googleapis.com/projects/_/buckets/my-bucket/objects/test.txt",
}
data = {
"protoPayload": {
"methodName": "storage.objects.create",
"authenticationInfo": {"principalEmail": "nobody@example.com"},
"resourceName": "some-resource",
}
}
event = CloudEvent(attributes, data)
main.hello_auditlog(event)
out, _ = capsys.readouterr()
assert "Event type: google.cloud.audit.log.v1.written" in out
assert (
"Subject: storage.googleapis.com/projects/_/buckets/my-bucket/objects/test.txt"
in out
)
assert "API method: storage.objects.create" in out
assert "Resource name: some-resource" in out
assert "Principal: nobody@example.com" in out
|
GoogleCloudPlatform/python-docs-samples
|
functions/v2/audit_log/main_test.py
|
Python
|
apache-2.0
| 1,569
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import subprocess
import uuid
from google.cloud import aiplatform
from google.cloud import bigquery
from google.cloud import storage
import pytest
import deploy_model
import predict
SUFFIX = uuid.uuid4().hex[0:6]
PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
BUCKET_NAME = f"wildlife-insights-{SUFFIX}"
BIGQUERY_DATASET = f"wildlife_insights_{SUFFIX}"
BIGQUERY_TABLE = "images_database"
MODEL_ENDPOINT = f"wildlife_insights_{SUFFIX}"
REGION = "us-central1"
MIN_IMAGES_PER_CLASS = 1
MAX_IMAGES_PER_CLASS = 1
# Use a pre-trained pre-existing model, training one takes too long.
MODEL_PATH = f"projects/{PROJECT}/locations/{REGION}/models/8785722428534816768"
@pytest.fixture(scope="session")
def bucket_name() -> str:
storage_client = storage.Client()
bucket = storage_client.create_bucket(BUCKET_NAME)
print(f"bucket_name: {repr(BUCKET_NAME)}")
yield BUCKET_NAME
bucket.delete(force=True)
@pytest.fixture(scope="session")
def bigquery_dataset() -> str:
bigquery_client = bigquery.Client()
dataset_id = f"{PROJECT}.{BIGQUERY_DATASET}"
bigquery_client.create_dataset(bigquery.Dataset(dataset_id))
print(f"bigquery_dataset: {repr(BIGQUERY_DATASET)}")
yield BIGQUERY_DATASET
bigquery_client.delete_dataset(dataset_id, delete_contents=True, not_found_ok=True)
@pytest.fixture(scope="session")
def bigquery_table(bigquery_dataset: str) -> str:
# Create a small test table.
table_id = f"{PROJECT}.{bigquery_dataset}.{BIGQUERY_TABLE}"
schema = [
bigquery.SchemaField("category", "STRING"),
bigquery.SchemaField("file_name", "STRING"),
]
rows = [
"alectoris rufa,animals/0059/1810.jpg",
"equus quagga,animals/0378/0118.jpg",
"fossa fossana,animals/0620/0242.jpg",
"human,humans/0379/0877.jpg",
"human,humans/0640/0467.jpg",
"lophotibis cristataa,animals/0605/1478.jpg",
"mazama temama,animals/0532/0525.jpg",
"odontophorus balliviani,animals/0523/1368.jpg",
"tayassu pecari,animals/0049/0849.jpg",
"tayassu pecari,animals/0090/1218.jpg",
]
bigquery_client = bigquery.Client()
with io.StringIO("\n".join(rows)) as source_file:
bigquery_client.load_table_from_file(
source_file,
table_id,
job_config=bigquery.LoadJobConfig(
source_format=bigquery.SourceFormat.CSV,
schema=schema,
),
).result()
# The table is deleted when we delete the dataset.
table = bigquery_client.get_table(table_id)
print(f"bigquery_table: {repr(BIGQUERY_TABLE)}")
print(f" table_id: {repr(table_id)}")
print(f" num_rows: {repr(table.num_rows)}")
print(f" schema: {repr(table.schema)}")
yield BIGQUERY_TABLE
@pytest.fixture(scope="session")
def model_endpoint_id() -> str:
print(f"model_path: {repr(MODEL_PATH)}")
endpoint_id = deploy_model.create_model_endpoint(PROJECT, REGION, MODEL_ENDPOINT)
deployed_model_id = deploy_model.deploy_model(
PROJECT, REGION, MODEL_PATH, MODEL_ENDPOINT, endpoint_id
)
print(f"model_endpoint_id: {repr(endpoint_id)}")
yield endpoint_id
client = aiplatform.gapic.EndpointServiceClient(
client_options={"api_endpoint": "us-central1-aiplatform.googleapis.com"}
)
endpoint_path = client.endpoint_path(PROJECT, REGION, endpoint_id)
client.undeploy_model(
endpoint=endpoint_path, deployed_model_id=deployed_model_id
).result()
client.delete_endpoint(name=endpoint_path).result()
@pytest.fixture(scope="session")
def cache_dependencies() -> None:
# The Dataflow staging procedure involves downloading all the requirements and
# rebuilding everything from scratch.
# Recent Apache Beam versions include dependencies that require a C++ and Rust compiler
# and compiling all the dependencies can take a long time.
# We download the pre-compiled dependencies and then set PIP_NO_DEPS to force
# pip to not rebuild any indirect dependencies.
subprocess.run(
[
"pip",
"download",
"--dest",
"/tmp/dataflow-requirements-cache",
"-r",
"requirements.txt",
],
check=True,
)
os.environ["PIP_NO_DEPS"] = "True"
def test_create_images_database_table(
bucket_name: str, bigquery_dataset: str, cache_dependencies: None
) -> None:
# The table is deleted when we delete the dataset.
subprocess.run(
[
"python",
"create_images_metadata_table.py",
f"--bigquery-dataset={bigquery_dataset}",
f"--bigquery-table={BIGQUERY_TABLE}_test",
"--runner=DataflowRunner",
f"--job_name=wildlife-images-database-{SUFFIX}",
f"--project={PROJECT}",
f"--temp_location=gs://{bucket_name}/temp",
f"--region={REGION}",
"--worker_machine_type=n1-standard-2",
],
check=True,
)
def test_train_model(
bucket_name: str,
bigquery_dataset: str,
bigquery_table: str,
cache_dependencies: None,
) -> None:
subprocess.run(
[
"python",
"train_model.py",
f"--cloud-storage-path=gs://{bucket_name}",
f"--bigquery-dataset={bigquery_dataset}",
f"--bigquery-table={bigquery_table}",
"--ai-platform-name-prefix=", # empty skips the AI Platform operations.
f"--min-images-per-class={MIN_IMAGES_PER_CLASS}",
f"--max-images-per-class={MAX_IMAGES_PER_CLASS}",
"--runner=DataflowRunner",
f"--job_name=wildlife-train-{SUFFIX}",
f"--project={PROJECT}",
f"--temp_location=gs://{bucket_name}/temp",
"--requirements_file=requirements.txt",
f"--region={REGION}",
],
check=True,
)
def test_predict(model_endpoint_id: str) -> None:
predictions = predict.run(
project=PROJECT,
region=REGION,
model_endpoint_id=model_endpoint_id,
image_file="animals/0036/0072.jpg", # tapirus indicus
)
assert len(predictions) > 0, f"predictions: {repr(predictions)}"
|
GoogleCloudPlatform/python-docs-samples
|
people-and-planet-ai/image-classification/e2e_test.py
|
Python
|
apache-2.0
| 6,821
|
from __future__ import absolute_import
from .administration import AGSAdministration
from .parameters import ClusterProtocol, Extension
__version__ = "3.5.5"
|
DShokes/ArcREST
|
src/arcrest/manageags/__init__.py
|
Python
|
apache-2.0
| 158
|
import asyncio
import unittest.mock
import pytest
START = object()
END = object()
RETVAL = object()
@pytest.fixture
def mock():
return unittest.mock.Mock(return_value=RETVAL)
@pytest.fixture
async def async_fixture(mock):
return await asyncio.sleep(0.1, result=mock(START))
@pytest.mark.asyncio
async def test_async_fixture(async_fixture, mock):
assert mock.call_count == 1
assert mock.call_args_list[-1] == unittest.mock.call(START)
assert async_fixture is RETVAL
|
pytest-dev/pytest-asyncio
|
tests/async_fixtures/test_async_fixtures.py
|
Python
|
apache-2.0
| 493
|
class Search(object):
def __init__(self, keyword=""):
self.keyword = keyword
@classmethod
def Positive(cls):
return cls(keyword="my film")
@classmethod
def Negative(cls):
return cls(keyword="sascmcsl")
|
iri6e4ka/test-project2
|
php4dvd/model/search.py
|
Python
|
apache-2.0
| 248
|
#Задача 10
#Напишите программу "Генератор персонажей" для игры. Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость. Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.
#Зябко Антон
#21.05.2016
print ("""
Вы находитесь в программе "Генератор Персонажей". У вас есть 4 характеристики:
Сила
Ловкость
Здоровье
Мудрось
Так же у вас есть 30 очков, которые вы можете распределить как вам угодно, между этими параметрами.
Приступим!
""")
strength=0
health=0
agility=0
intellect=0
points=30
skill=0
print("Если вы хотите прокачать Силу, напишите 'Сила', если Здоровье, то'Здоровье', если Ловкость,то'Ловкость', если Мудрость,то 'Мудрость' ")
while True:
if strength<0 or health<0 or agility<0 or intellect<0 or points>30:
print("ошибка")
break
elif points==0:
print("Все очки потрачены. Итоговые результаты:\nСила:",strength,"\nЗдоровье:",health,"\nМудрость:",intellect,"\nЛовкость:",agility,"")
break
print("Ваши очки:\nСила:",strength,"\nЗдоровье:",health,"\nМудрость:",intellect,"\nЛовкость:",agility,"\nНераспределенные очки:",points)
user_input=input("")
if user_input=="Сила":
skill=int(input("Сколько очков вы хотите вкачать(убрать)?"))
if skill <= points:
strength += skill
points-= skill
else:
print("слишком много очков")
elif user_input=="Здоровье":
skill=int(input("сколько очков вы хотите вкачать(убрать)?"))
if skill <=points:
health += skill
points -= skill
else:
print("cлишком много очков")
elif user_input=="Ловкость":
skill=int(input("сколько очков вы хотите вкачать(убрать)?"))
if skill <=points:
agility += skill
points -= skill
else:
print("cлишком много очков")
elif user_input=="Мудрость":
skill=int(input("сколько очков вы хотите вкачать(убрать)?"))
if skill <= points:
intellect +=skill
points-= skil
else:
print("cлишком много очков")
input ("нажмите Enter для выхода")
|
Mariaanisimova/pythonintask
|
BIZa/2014/Zyabko_A_A/Task_10_45.py
|
Python
|
apache-2.0
| 3,142
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
from quantumclient.quantum.v2_0 import CreateCommand
from quantumclient.quantum.v2_0 import DeleteCommand
from quantumclient.quantum.v2_0 import ListCommand
from quantumclient.quantum.v2_0 import UpdateCommand
from quantumclient.quantum.v2_0 import ShowCommand
def _format_subnets(network):
try:
return '\n'.join(network['subnets'])
except Exception:
return ''
class ListNetwork(ListCommand):
"""List networks that belong to a given tenant."""
resource = 'network'
log = logging.getLogger(__name__ + '.ListNetwork')
_formatters = {'subnets': _format_subnets, }
list_columns = ['id', 'name', 'subnets']
class ShowNetwork(ShowCommand):
"""Show information of a given network."""
resource = 'network'
log = logging.getLogger(__name__ + '.ShowNetwork')
class CreateNetwork(CreateCommand):
"""Create a network for a given tenant."""
resource = 'network'
log = logging.getLogger(__name__ + '.CreateNetwork')
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
default=True, action='store_false',
help='Set Admin State Up to false')
parser.add_argument(
'--admin_state_down',
action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'--shared',
action='store_true',
default=argparse.SUPPRESS,
help='Set the network as shared')
parser.add_argument(
'name', metavar='name',
help='Name of network to create')
def args2body(self, parsed_args):
body = {'network': {
'name': parsed_args.name,
'admin_state_up': parsed_args.admin_state_down}, }
if parsed_args.tenant_id:
body['network'].update({'tenant_id': parsed_args.tenant_id})
if hasattr(parsed_args, 'shared'):
body['network'].update({'shared': parsed_args.shared})
return body
class DeleteNetwork(DeleteCommand):
"""Delete a given network."""
log = logging.getLogger(__name__ + '.DeleteNetwork')
resource = 'network'
class UpdateNetwork(UpdateCommand):
"""Update network's information."""
log = logging.getLogger(__name__ + '.UpdateNetwork')
resource = 'network'
|
akushwah/python-quantumclient-n1kv
|
quantumclient/quantum/v2_0/network.py
|
Python
|
apache-2.0
| 3,028
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Pradeep Jairamani , github.com/pradeepjairamani
def wp_plugins():
return ["1-flash-gallery", "1-jquery-photo-gallery-slideshow-flash", "Calendar", "Premium_Gallery_Manager",
"a-gallery", "a-to-z-category-listing", "accept-signups", "adminimize", "adrotate", "advanced-text-widget",
"advertizer", "age-verification", "ajax-category-dropdown", "ajax-store-locator-wordpress_0", "ajaxgallery",
"akismet", "alert-before-your-post", "all-in-one-wp-security-and-firewall", "all-video-gallery",
"allwebmenus-wordpress-menu-plugin", "annonces", "announcement-and-vertical-scroll-news", "asset-manager",
"audio", "auto-attachments", "backwpup", "bbpress", "bezahlcode-generator", "booking",
"cac-featured-content", "catalog", "category-grid-view-gallery", "category-list-portfolio-page",
"cevhershare", "cforms", "cforms2", "cimy-counter", "clickdesk-live-support-chat", "cm-download-manager",
"cms-pack", "comicpress-manager", "comment-rating", "community-events", "complete-gallery-manager",
"contact-form-wordpress", "contus-hd-flv-player", "contus-video-gallery", "copyright-licensing-tools",
"count-per-day", "couponer", "cpl", "crawlrate-tracker", "custom-content-type-manager",
"daily-maui-photo-widget", "db-backup", "disclosure-policy-plugin", "dm-albums", "dmsguestbook",
"downloads-manager", "dp-thumbnail", "drag-drop-file-uploader", "dzs-videogallery",
"easy-contact-form-lite", "easy-contact-forms-exporter", "editormonkey", "evarisk", "event-registration",
"eventify", "extend-wordpress", "facebook-opengraph-meta-plugin", "fbpromotions", "fcchat",
"feature-slideshow", "featurific-for-wordpress", "feedlist", "fgallery", "file-groups", "filedownload",
"firestats", "flash-album-gallery", "flexible-custom-post-type", "font-uploader", "formcraft",
"forum-server", "foxypress", "front-end-upload", "front-file-manager", "fs-real-estate-plugin",
"gallery-images", "gallery-plugin", "gd-star-rating", "global-content-blocks", "google-document-embedder",
"google-mp3-audio-player", "grapefile", "hd-webplayer", "html5avmanager", "igit-posts-slider-widget",
"image-gallery-with-slideshow", "inboundio-marketing", "inline-gallery", "ip-logger", "is-human", "islidex",
"iwant-one-ihave-one", "jetpack", "jquery-mega-menu", "jrss-widget", "js-appointment", "kino-gallery",
"kish-guest-posting", "knr-author-list-widget", "lanoba-social-plugin", "lazy-seo", "lazyest-gallery",
"leaguemanager", "like-dislike-counter-for-posts-pages-and-comments", "link-library",
"lisl-last-image-slider", "livesig", "login-lockdown", "mac-dock-gallery", "mailz",
"media-library-categories", "meenews", "mingle-forum", "mm-forms-community", "myflash", "mystat",
"nextgen-gallery", "nextgen-smooth-gallery", "odihost-newsletter-plugin", "old-post-spinner",
"omni-secure-files", "oqey-gallery", "oqey-headers", "page-flip-image-gallery", "paid-downloads",
"photoracer", "photosmash-galleries", "php_speedy_wp", "pica-photo-gallery", "pictpress", "placester",
"plugin-dir", "plugin-newsletter", "post-highlights", "premium_gallery_manager", "pretty-link", "profiles",
"proplayer", "pure-html", "radykal-fancy-gallery", "rating-widget", "rbxgallery", "really-easy-slider",
"recipe", "reciply", "reflex-gallery", "rekt-slideshow", "related-sites", "relocate-upload", "rent-a-car",
"resume-submissions-job-postings", "ripe-hd-player", "rss-feed-reader", "scormcloud", "search-autocomplete",
"sendit", "seo-automatic-seo-tools", "sermon-browser", "sexy-contact-form", "sfbrowser", "sh-slideshow",
"si-contact-form", "simple-download-button-shortcode", "skysa-official", "sniplets", "social-discussions",
"social-slider-2", "sodahead-polls", "sp-client-document-manager", "spicy-blogroll",
"spider-event-calendar", "st_newsletter", "statpress", "stats", "store-locator-le", "subscribe-to-comments",
"tagninja", "the-welcomizer", "thecartpress", "thinkun-remind", "tinymce-thumbnail-gallery", "topquark",
"tune-library", "ucan-post", "ungallery", "uploader", "uploads", "upm-polls", "user-avatar", "user-meta",
"verve-meta-boxes", "videowhisper-live-streaming-integration", "videowhisper-video-presentation",
"vk-gallery", "vodpod-video-gallery", "wassup", "webinar_plugin", "website-faq",
"wordpress-donation-plugin-with-goals-and-paypal-ipn-by-nonprofitcmsorg", "wordpress-processing-embed",
"wordtube", "wp-adserve", "wp-audio-gallery-playlist", "wp-automatic", "wp-autoyoutube", "wp-bannerize",
"wp-cal", "wp-content", "wp-cumulus", "wp-custom-pages", "wp-db-backup", "wp-ds-faq", "wp-e-commerce",
"wp-easycart", "wp-featured-post-with-thumbnail", "wp-filebase", "wp-filemanager", "wp-footnotes",
"wp-forum", "wp-glossary", "wp-gpx-maps", "wp-lytebox", "wp-marketplace", "wp-menu-creator", "wp-people",
"wp-property", "wp-publication-archive", "wp-realty", "wp-safe-search", "wp-shopping-cart", "wp-spamfree",
"wp-starsratebox", "wp-stats-dashboard", "wp-support-plus-responsive-ticket-system",
"wp-survey-and-quiz-tool", "wp-symposium", "wp-syntax", "wp-table", "wp-twitter-feed", "wp-whois", "wpSS",
"wpeasystats", "wpforum", "wpmarketplace", "wpstorecart", "wptouch", "x7host-videox7-ugc-plugin",
"yolink-search", "yt-audio-streaming-audio-from-youtube", "zingiri-web-shop", "zotpress",
"zotpressinboundio-marketing"]
|
viraintel/OWASP-Nettacker
|
lib/scan/wp_plugin/wp_plugins_small.py
|
Python
|
apache-2.0
| 5,928
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys
# Include custom libs
sys.path.append( '../../include/python' )
import serverutils.config as config
import serverutils.mongohelper as mongohelper
import pymongo
props = {'targetDbName': 'characterinsights_copy'}
for prop in sys.argv[1:]:
k,v = prop.split("=",1)
props[k]=v
# Connect DBs
mongoClientSource, mongoDbSource = mongohelper.getMongoClient()
mongoClientTarget, mongoDbTarget = mongohelper.getMongoClient(orMongoMode='local',orHost='localhost',orDbName=props['targetDbName'])
collections = ['inouttest','movies','personalitydimensionnormalizations','rawMovies','rawQuotes','results']
for collection in collections:
print(collection)
for doc in mongoDbSource[collection].find():
mongoDbTarget[collection].insert_one(doc)
print('.',end='',flush=True)
print("")
print("Done.")
|
Thylossus/tud-movie-character-insights
|
Server/Tools/urlUpdater/copyDatabase.py
|
Python
|
apache-2.0
| 853
|
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def not_anonymous_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in and his username is not contain 'anonymous',
redirecting to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated() and 'anonymous' not in u.username,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
|
raccoongang/socraticqs2
|
mysite/mysite/decorators.py
|
Python
|
apache-2.0
| 670
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
# FIXME: This could be replaced with hooking into the `AppConfig.ready()`
# signal in Django 1.7:
#
# https://docs.djangoproject.com/en/1.7/ref/applications/#django.apps.AppConfig.ready
#
# For now though we have to load in the monkey patches here because we know
# this file has been loaded after `desktop.settings` has been loaded.
import spark.monkey_patches
# Views
urlpatterns = patterns('spark.views',
url(r'^$', 'editor', name='index'),
url(r'^editor$', 'editor', name='editor'),
url(r'^notebooks$', 'notebooks', name='notebooks'),
url(r'^new$', 'new', name='new'),
url(r'^download$', 'download', name='download'),
)
# APIs
urlpatterns += patterns('spark.api',
url(r'^api/create_session$', 'create_session', name='create_session'),
url(r'^api/execute$', 'execute', name='execute'),
url(r'^api/check_status$', 'check_status', name='check_status'),
url(r'^api/fetch_result_data$', 'fetch_result_data', name='fetch_result_data'),
url(r'^api/fetch_result_metadata$', 'fetch_result_metadata', name='fetch_result_metadata'),
url(r'^api/cancel_statement', 'cancel_statement', name='cancel_statement'),
url(r'^api/close_statement', 'close_statement', name='close_statement'),
url(r'^api/get_logs', 'get_logs', name='get_logs'),
url(r'^api/notebook/save$', 'save_notebook', name='save_notebook'),
url(r'^api/notebook/open$', 'open_notebook', name='open_notebook'),
url(r'^api/notebook/close$', 'close_notebook', name='close_notebook'),
)
|
erickt/hue
|
apps/spark/src/spark/urls.py
|
Python
|
apache-2.0
| 2,311
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests TFL canned estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl import logging
from absl.testing import parameterized
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import tensorflow as tf
from tensorflow import feature_column as fc
from tensorflow_lattice.python import configs
from tensorflow_lattice.python import estimators
from tensorflow_lattice.python import model_info
from tensorflow_estimator.python.estimator.head import regression_head
class CannedEstimatorsTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(CannedEstimatorsTest, self).setUp()
self.eps = 0.001
# UCI Statlog (Heart) dataset.
heart_csv_file = tf.keras.utils.get_file(
'heart.csv',
'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
heart_df = pd.read_csv(heart_csv_file)
heart_target = heart_df.pop('target')
heart_train_size = int(len(heart_df) * 0.8)
self.heart_train_x = heart_df[:heart_train_size]
self.heart_train_y = heart_target[:heart_train_size]
self.heart_test_x = heart_df[heart_train_size:]
self.heart_test_y = heart_target[heart_train_size:]
# Feature columns.
# - age
# - sex
# - cp chest pain type (4 values)
# - trestbps resting blood pressure
# - chol serum cholestoral in mg/dl
# - fbs fasting blood sugar > 120 mg/dl
# - restecg resting electrocardiographic results (values 0,1,2)
# - thalach maximum heart rate achieved
# - exang exercise induced angina
# - oldpeak ST depression induced by exercise relative to rest
# - slope the slope of the peak exercise ST segment
# - ca number of major vessels (0-3) colored by flourosopy
# - thal 3 = normal; 6 = fixed defect; 7 = reversable defect
self.heart_feature_columns = [
fc.numeric_column('age', default_value=-1),
fc.categorical_column_with_vocabulary_list('sex', [0, 1]),
fc.numeric_column('cp'),
fc.numeric_column('trestbps', default_value=-1),
fc.numeric_column('chol'),
fc.categorical_column_with_vocabulary_list('fbs', [0, 1]),
fc.categorical_column_with_vocabulary_list('restecg', [0, 1, 2]),
fc.numeric_column('thalach'),
fc.categorical_column_with_vocabulary_list('exang', [0, 1]),
fc.numeric_column('oldpeak'),
fc.categorical_column_with_vocabulary_list('slope', [0, 1, 2]),
fc.numeric_column('ca'),
fc.categorical_column_with_vocabulary_list(
'thal', ['normal', 'fixed', 'reversible']),
]
# Feature configs. Each model can pick and choose which features to use.
self.heart_feature_configs = [
configs.FeatureConfig(
name='age',
lattice_size=3,
pwl_calibration_num_keypoints=5,
monotonicity=1,
pwl_calibration_clip_max=100,
),
configs.FeatureConfig(
name='cp',
pwl_calibration_num_keypoints=4,
pwl_calibration_input_keypoints='uniform',
monotonicity='increasing',
),
configs.FeatureConfig(
name='chol',
pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],
monotonicity=1,
pwl_calibration_clip_min=130,
pwl_calibration_clamp_min=True,
pwl_calibration_clamp_max=True,
regularizer_configs=[
configs.RegularizerConfig(name='calib_hessian', l2=1e-4),
],
),
configs.FeatureConfig(
name='fbs',
monotonicity=[(0, 1)],
),
configs.FeatureConfig(
name='trestbps',
pwl_calibration_num_keypoints=5,
monotonicity='decreasing',
),
configs.FeatureConfig(
name='thalach',
pwl_calibration_num_keypoints=5,
monotonicity=-1,
),
configs.FeatureConfig(
name='restecg',
monotonicity=[(0, 1), (0, 2)],
),
configs.FeatureConfig(
name='exang',
monotonicity=[(0, 1)],
),
configs.FeatureConfig(
name='oldpeak',
pwl_calibration_num_keypoints=5,
monotonicity=1,
),
configs.FeatureConfig(
name='slope',
monotonicity=[(0, 1), (1, 2)],
),
configs.FeatureConfig(
name='ca',
pwl_calibration_num_keypoints=4,
monotonicity='increasing',
),
configs.FeatureConfig(
name='thal',
monotonicity=[('normal', 'fixed'), ('normal', 'reversible')],
),
]
# UCI Boston dataset.
boston_dataset = load_boston()
boston_df = pd.DataFrame(
boston_dataset.data, columns=boston_dataset.feature_names)
boston_df['CHAS'] = boston_df['CHAS'].astype(np.int32)
boston_target = pd.Series(boston_dataset.target)
boston_train_size = int(len(boston_df) * 0.8)
self.boston_train_x = boston_df[:boston_train_size]
self.boston_train_y = boston_target[:boston_train_size]
self.boston_test_x = boston_df[boston_train_size:]
self.boston_test_y = boston_target[boston_train_size:]
# Feature columns.
# - CRIM per capita crime rate by town
# - ZN proportion of residential land zoned for lots over 25,000 sq.ft
# - INDUS proportion of non-retail business acres per town
# - CHAS Charles River dummy variable (= 1 if tract bounds river)
# - NOX nitric oxides concentration (parts per 10 million)
# - RM average number of rooms per dwelling
# - AGE proportion of owner-occupied units built prior to 1940
# - DIS weighted distances to five Boston employment centres
# - RAD index of accessibility to radial highways
# - TAX full-value property-tax rate per $10,000
# - PTRATIO pupil-teacher ratio by town
# - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
# - LSTAT % lower status of the population
# - Target Median value of owner-occupied homes in $1000's
self.boston_feature_columns = [
fc.numeric_column('CRIM'),
fc.numeric_column('ZN'),
fc.numeric_column('INDUS'),
fc.categorical_column_with_vocabulary_list('CHAS', [0, 1]),
fc.numeric_column('NOX'),
fc.numeric_column('RM'),
fc.numeric_column('AGE'),
fc.numeric_column('DIS'),
fc.numeric_column('RAD'),
fc.numeric_column('TAX'),
fc.numeric_column('PTRATIO'),
fc.numeric_column('B'),
fc.numeric_column('LSTAT'),
]
# Feature configs. Each model can pick and choose which features to use.
self.boston_feature_configs = [
configs.FeatureConfig(
name='CRIM',
lattice_size=3,
monotonicity=-1,
pwl_calibration_convexity=1,
),
configs.FeatureConfig(
name='ZN',
pwl_calibration_input_keypoints=[0.0, 25.0, 50.0, 75.0, 100.0],
monotonicity=1,
reflects_trust_in=[
configs.TrustConfig(feature_name='RM', trust_type='trapezoid'),
],
),
configs.FeatureConfig(
name='INDUS',
pwl_calibration_input_keypoints='uniform',
pwl_calibration_always_monotonic=False,
reflects_trust_in=[
configs.TrustConfig(
feature_name='RM',
trust_type='edgeworth',
direction='negative'),
],
regularizer_configs=[
configs.RegularizerConfig(name='calib_wrinkle', l2=1e-4),
],
),
configs.FeatureConfig(name='CHAS',),
configs.FeatureConfig(name='NOX',),
configs.FeatureConfig(
name='RM',
monotonicity='increasing',
pwl_calibration_convexity='concave',
),
configs.FeatureConfig(
name='AGE',
monotonicity=-1,
),
configs.FeatureConfig(
name='DIS',
lattice_size=3,
unimodality=1,
),
configs.FeatureConfig(name='RAD',),
configs.FeatureConfig(name='TAX',),
configs.FeatureConfig(
name='PTRATIO',
monotonicity='decreasing',
),
configs.FeatureConfig(name='B',),
configs.FeatureConfig(
name='LSTAT',
monotonicity=-1,
dominates=[
configs.DominanceConfig(
feature_name='AGE', dominance_type='monotonic'),
],
),
]
def _ResetAllBackends(self):
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
def _GetInputFn(self, x, y, num_epochs=1, batch_size=100, tfds=False):
if tfds:
def _input_fn():
return tf.data.Dataset.from_tensor_slices(
(x.to_dict('list'), y.values)).batch(batch_size).repeat(num_epochs)
return _input_fn
else:
return tf.compat.v1.estimator.inputs.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False,
num_epochs=num_epochs,
num_threads=1)
def _GetHeartTrainInputFn(self, **kwargs):
return self._GetInputFn(self.heart_train_x, self.heart_train_y, **kwargs)
def _GetHeartTestInputFn(self, **kwargs):
return self._GetInputFn(
self.heart_test_x, self.heart_test_y, num_epochs=1, **kwargs)
def _GetBostonTrainInputFn(self, **kwargs):
return self._GetInputFn(self.boston_train_x, self.boston_train_y, **kwargs)
def _GetBostonTestInputFn(self, **kwargs):
return self._GetInputFn(
self.boston_test_x, self.boston_test_y, num_epochs=1, **kwargs)
@parameterized.parameters(
([
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca', 'thal'
], [['sex', 'oldpeak'], ['fbs', 'thalach'], ['thalach', 'thal'],
['cp', 'trestbps'], ['age', 'ca', 'chol']
], None, None, False, True, 0.8),
([
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca', 'thal'
], 'random', 6, 5, True, False, 0.85),
([
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca', 'thal'
], 'crystals', 6, 5, True, False, 0.85),
([
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca', 'thal'
], 'rtl_layer', 6, 5, True, False, 0.85),
)
def testCalibratedLatticeEnsembleClassifier(self, feature_names, lattices,
num_lattices, lattice_rank,
separate_calibrators,
output_calibration, auc):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.heart_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.heart_feature_configs
if feature_config.name in feature_names
]
if lattices == 'rtl_layer':
# RTL Layer only supports monotonicity and bound constraints.
feature_configs = copy.deepcopy(feature_configs)
for feature_config in feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
model_config = configs.CalibratedLatticeEnsembleConfig(
regularizer_configs=[
configs.RegularizerConfig(name='torsion', l2=1e-4),
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
feature_configs=feature_configs,
lattices=lattices,
num_lattices=num_lattices,
lattice_rank=lattice_rank,
separate_calibrators=separate_calibrators,
output_calibration=output_calibration,
)
estimator = estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=50),
optimizer=tf.keras.optimizers.Adam(0.01),
prefitting_optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetHeartTestInputFn())
logging.info('Calibrated lattice ensemble classifier results:')
logging.info(results)
self.assertGreater(results['auc'], auc)
@parameterized.parameters(
(['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'], False, 0.75),
(['age', 'cp', 'chol', 'slope', 'ca', 'thal'], False, 0.8),
(['trestbps', 'thalach', 'exang', 'oldpeak', 'thal'], True, 0.8),
)
def testCalibratedLatticeClassifier(self, feature_names, output_calibration,
auc):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.heart_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.heart_feature_configs
if feature_config.name in feature_names
]
model_config = configs.CalibratedLatticeConfig(
regularizer_configs=[
configs.RegularizerConfig(name='torsion', l2=1e-4),
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
output_calibration=output_calibration,
feature_configs=feature_configs)
estimator = estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetHeartTestInputFn())
logging.info('Calibrated lattice classifier results:')
logging.info(results)
self.assertGreater(results['auc'], auc)
@parameterized.parameters(
(['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'
], False, False, None, None, 'mean', 0.7),
([
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',
'exang', 'oldpeak', 'slope', 'ca', 'thal'
], True, True, None, None, 'mean', 0.8),
(['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'
], False, False, 'thalach', None, 'mean', 0.7),
(['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'
], False, False, 'thalach', 'thalach', 'mean', 0.7),
(['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'
], False, False, 'thalach', 'thalach', 'sum', 0.7),
)
def testCalibratedLinearClassifier(self, feature_names, output_calibration,
use_bias, weight_column,
feature_analysis_weight_column,
feature_analysis_weight_reduction, auc):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.heart_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.heart_feature_configs
if feature_config.name in feature_names
]
model_config = configs.CalibratedLinearConfig(
use_bias=use_bias,
regularizer_configs=[
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
output_calibration=output_calibration,
feature_configs=feature_configs)
estimator = estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
weight_column=weight_column,
feature_analysis_weight_column=feature_analysis_weight_column,
feature_analysis_weight_reduction=feature_analysis_weight_reduction,
optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetHeartTestInputFn())
logging.info('Calibrated linear classifier results:')
logging.info(results)
self.assertGreater(results['auc'], auc)
@parameterized.parameters(
([
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'
], [['CRIM', 'ZN', 'RAD', 'DIS'], ['PTRATIO', 'LSTAT', 'ZN', 'RM'],
['AGE', 'NOX', 'B'], ['INDUS', 'NOX', 'PTRATIO'], ['TAX', 'CHAS'],
['CRIM', 'INDUS', 'AGE', 'RM', 'CHAS']
], None, None, False, True, 60.0),
([
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'
], 'random', 6, 5, True, False, 50.0),
([
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'
], 'crystals', 6, 5, True, False, 50.0),
([
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'
], 'rtl_layer', 6, 5, True, False, 50.0),
)
def testCalibratedLatticeEnsembleRegressor(self, feature_names, lattices,
num_lattices, lattice_rank,
separate_calibrators,
output_calibration, average_loss):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.boston_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.boston_feature_configs
if feature_config.name in feature_names
]
if lattices == 'rtl_layer':
# RTL Layer only supports monotonicity and bound constraints.
feature_configs = copy.deepcopy(feature_configs)
for feature_config in feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
model_config = configs.CalibratedLatticeEnsembleConfig(
regularizer_configs=[
configs.RegularizerConfig(name='torsion', l2=1e-5),
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-5),
],
feature_configs=feature_configs,
lattices=lattices,
num_lattices=num_lattices,
lattice_rank=lattice_rank,
separate_calibrators=separate_calibrators,
output_calibration=output_calibration,
)
estimator = estimators.CannedRegressor(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1),
prefitting_input_fn=self._GetBostonTrainInputFn(num_epochs=50),
optimizer=tf.keras.optimizers.Adam(0.05),
prefitting_optimizer=tf.keras.optimizers.Adam(0.05))
estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetBostonTestInputFn())
logging.info('Calibrated lattice ensemble regressor results:')
logging.info(results)
self.assertLess(results['average_loss'], average_loss)
@parameterized.parameters(
(['CRIM', 'ZN', 'RM', 'DIS', 'PTRATIO', 'LSTAT'], False, 40.0),
(['CRIM', 'INDUS', 'CHAS', 'NOX', 'AGE', 'RAD', 'TAX', 'B'], True, 40.0),
(['CRIM', 'INDUS', 'LSTAT', 'NOX', 'AGE', 'RAD', 'TAX', 'B'], True, 40.0),
)
def testCalibratedLatticeRegressor(self, feature_names, output_calibration,
average_loss):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.boston_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.boston_feature_configs
if feature_config.name in feature_names
]
model_config = configs.CalibratedLinearConfig(
regularizer_configs=[
configs.RegularizerConfig(name='torsion', l2=1e-4),
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
output_calibration=output_calibration,
feature_configs=feature_configs)
estimator = estimators.CannedRegressor(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1),
optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetBostonTestInputFn())
logging.info('Calibrated lattice regressor results:')
logging.info(results)
self.assertLess(results['average_loss'], average_loss)
@parameterized.parameters(
(['CRIM', 'ZN', 'RM', 'DIS', 'PTRATIO', 'LSTAT'], False, False, 40.0),
([
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'
], True, True, 40.0),
)
def testCalibratedLinearRegressor(self, feature_names, output_calibration,
use_bias, average_loss):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.boston_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.boston_feature_configs
if feature_config.name in feature_names
]
model_config = configs.CalibratedLinearConfig(
use_bias=use_bias,
regularizer_configs=[
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
output_calibration=output_calibration,
feature_configs=feature_configs)
estimator = estimators.CannedRegressor(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1),
optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetBostonTestInputFn())
logging.info('Calibrated linear regressor results:')
logging.info(results)
self.assertLess(results['average_loss'], average_loss)
@parameterized.parameters(
(['CRIM', 'ZN', 'RM', 'DIS', 'PTRATIO', 'LSTAT'], False, False, 40.0),
([
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT'
], True, True, 40.0),
)
def testCalibratedLinearEstimator(self, feature_names, output_calibration,
use_bias, average_loss):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.boston_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.boston_feature_configs
if feature_config.name in feature_names
]
model_config = configs.CalibratedLinearConfig(
use_bias=use_bias,
regularizer_configs=[
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
output_calibration=output_calibration,
feature_configs=feature_configs)
estimator = estimators.CannedEstimator(
head=regression_head.RegressionHead(),
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1),
optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200))
results = estimator.evaluate(input_fn=self._GetBostonTestInputFn())
logging.info('Calibrated linear regressor results:')
logging.info(results)
self.assertLess(results['average_loss'], average_loss)
@parameterized.parameters(
('random', 5, 6, 'all_vertices', False, True),
('random', 4, 5, 'kronecker_factored', True, False),
('rtl_layer', 5, 6, 'kronecker_factored', False, True),
('rtl_layer', 4, 5, 'all_vertices', True, False),
)
def testCalibratedLatticeEnsembleModelInfo(self, lattices, num_lattices,
lattice_rank, parameterization,
separate_calibrators,
output_calibration):
self._ResetAllBackends()
feature_configs = copy.deepcopy(self.heart_feature_configs)
if lattices == 'rtl_layer' or parameterization == 'kronecker_factored':
# RTL Layer only supports monotonicity and bound constraints.
for feature_config in feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=feature_configs,
lattices=lattices,
num_lattices=num_lattices,
lattice_rank=lattice_rank,
parameterization=parameterization,
separate_calibrators=separate_calibrators,
output_calibration=output_calibration,
)
estimator = estimators.CannedClassifier(
feature_columns=self.heart_feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=5),
optimizer=tf.keras.optimizers.Adam(0.01),
prefitting_optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=20))
# Serving input fn is used to create saved models.
serving_input_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec=fc.make_parse_example_spec(self.heart_feature_columns))
)
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
logging.info('Model exported to %s', saved_model_path)
model = estimators.get_model_graph(saved_model_path)
expected_num_nodes = (
len(self.heart_feature_columns) + # Input features
num_lattices + # One lattice per submodel
1 + # Averaging submodels
int(output_calibration)) # Output calibration
if separate_calibrators:
expected_num_nodes += num_lattices * lattice_rank
else:
expected_num_nodes += len(self.heart_feature_columns)
self.assertLen(model.nodes, expected_num_nodes)
@parameterized.parameters(
(['ZN', 'INDUS', 'RM'], 'random', 3, 1, [['ZN', 'RM'], ['RM'], ['INDUS']
]),
(['ZN', 'INDUS', 'RM'], 'crystals', 3, 1, [['RM'], ['INDUS'],
['ZN', 'RM']]),
(['RM', 'LSTAT', 'AGE'], 'crystals', 3, 1, [['LSTAT'], ['LSTAT', 'AGE'],
['RM']]),
)
def testCalibratedLatticeEnsembleFix2dConstraintViolations(
self, feature_names, lattices, num_lattices, lattice_rank,
expected_lattices):
self._ResetAllBackends()
feature_columns = [
feature_column for feature_column in self.boston_feature_columns
if feature_column.name in feature_names
]
feature_configs = [
feature_config for feature_config in self.boston_feature_configs
if feature_config.name in feature_names
]
model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=feature_configs,
lattices=lattices,
num_lattices=num_lattices,
lattice_rank=lattice_rank,
)
estimator = estimators.CannedRegressor(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1),
prefitting_input_fn=self._GetBostonTrainInputFn(num_epochs=50),
optimizer=tf.keras.optimizers.Adam(0.05),
prefitting_optimizer=tf.keras.optimizers.Adam(0.05))
estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200))
# Serving input fn is used to create saved models.
serving_input_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec=fc.make_parse_example_spec(feature_columns)))
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
logging.info('Model exported to %s', saved_model_path)
model = estimators.get_model_graph(saved_model_path)
lattices = []
for node in model.nodes:
if isinstance(node, model_info.LatticeNode):
lattices.append(
[input_node.input_node.name for input_node in node.input_nodes])
self.assertLen(lattices, len(expected_lattices))
for lattice, expected_lattice in zip(lattices, expected_lattices):
self.assertCountEqual(lattice, expected_lattice)
@parameterized.parameters((True,), (False,))
def testDatasetAPI(self, tfds):
self._ResetAllBackends()
feature_columns = self.heart_feature_columns
feature_configs = self.heart_feature_configs
model_config = configs.CalibratedLinearConfig(
feature_configs=feature_configs)
estimator = estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetHeartTrainInputFn(
num_epochs=1, tfds=tfds),
optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(
input_fn=self._GetHeartTrainInputFn(num_epochs=200, tfds=tfds))
results = estimator.evaluate(input_fn=self._GetHeartTestInputFn(tfds=tfds))
logging.info('Calibrated linear classifier results:')
logging.info(results)
self.assertGreater(results['auc'], 0.7)
@parameterized.parameters(
('linear', None, True),
('lattice', 'all_vertices', False),
('lattice', 'kronecker_factored', False),
)
def testCalibratedModelInfo(self, model_type, parameterization,
output_calibration):
self._ResetAllBackends()
if model_type == 'linear':
model_config = configs.CalibratedLinearConfig(
feature_configs=self.heart_feature_configs,
output_calibration=output_calibration,
)
else:
feature_configs = copy.deepcopy(self.heart_feature_configs)
if parameterization == 'kronecker_factored':
# RTL Layer only supports monotonicity and bound constraints.
for feature_config in feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
model_config = configs.CalibratedLatticeConfig(
feature_configs=feature_configs,
parameterization=parameterization,
output_calibration=output_calibration,
)
estimator = estimators.CannedClassifier(
feature_columns=self.heart_feature_columns,
model_config=model_config,
feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=5),
optimizer=tf.keras.optimizers.Adam(0.01),
prefitting_optimizer=tf.keras.optimizers.Adam(0.01))
estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=20))
# Serving input fn is used to create saved models.
serving_input_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec=fc.make_parse_example_spec(self.heart_feature_columns))
)
saved_model_path = estimator.export_saved_model(estimator.model_dir,
serving_input_fn)
logging.info('Model exported to %s', saved_model_path)
model = estimators.get_model_graph(saved_model_path)
expected_num_nodes = (
2 * len(self.heart_feature_columns) + # Input features and calibration
1 + # Linear or lattice layer
int(output_calibration)) # Output calibration
self.assertLen(model.nodes, expected_num_nodes)
if __name__ == '__main__':
tf.test.main()
|
tensorflow/lattice
|
tensorflow_lattice/python/estimators_test.py
|
Python
|
apache-2.0
| 33,843
|
# -*- coding: utf-8 -*-
#
# openstack.common documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 4 16:08:06 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openstack.common'
copyright = u'2011, OpenStack, LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openstackcommondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openstackcommon.tex', u'openstack.common Documentation',
u'OpenStack, LLC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openstackcommon', u'openstack.common Documentation',
[u'OpenStack, LLC'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
blamarvt/openstack-common
|
doc/source/conf.py
|
Python
|
apache-2.0
| 7,283
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUClusterResolver."""
import os
import six
from six.moves.urllib.error import URLError
from tensorflow.python import framework
from tensorflow.python.client import session
from tensorflow.python.distribute.cluster_resolver.tpu import tpu_cluster_resolver as resolver
from tensorflow.python.eager.context import LogicalDevice
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
mock = test.mock
try:
from cloud_tpu_client import client # pylint: disable=g-import-not-at-top
except ImportError:
logging.debug(
'Falling back to TensorFlow client; we recommended you install the Cloud '
'TPU client directly with pip install cloud-tpu-client.')
from tensorflow.python.tpu.client import client # pylint: disable=g-import-not-at-top
class MockRequestClass(object):
def __init__(self, name, tpu_map):
self._name = name
self._tpu_map = tpu_map
def execute(self):
if self._name in self._tpu_map:
return self._tpu_map[self._name]
else:
raise KeyError('Resource %s was not found' % self._name)
class MockNodeClass(object):
def __init__(self, tpu_map):
self._tpu_map = tpu_map
def get(self, name):
return MockRequestClass(name, self._tpu_map)
def mock_request_compute_metadata(*args, **kwargs):
del kwargs # Unused.
if args[0] == 'project/project-id':
return 'test-project'
elif args[0] == 'instance/zone':
return 'projects/test-project/locations/us-central1-c'
elif args[0] == 'instance/network-interfaces/0/ip':
return '10.128.1.2'
return ''
def mock_is_running_in_gce():
return True
def mock_is_not_running_in_gce():
return False
def mock_running_in_gce_urlopen(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
mock_response = mock.MagicMock()
mock_response.info.return_value = {'Metadata-Flavor': 'Google'}
return mock_response
def mock_not_running_in_gce_urlopen(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
raise URLError(reason='Host does not exist.')
@test_util.run_all_in_graph_and_eager_modes
class TPUClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
"""Verifies that the ClusterSpec generates the correct proto.
We are testing this four different ways to ensure that the ClusterSpec
returned by the TPUClusterResolver behaves identically to a normal
ClusterSpec when passed into the generic ClusterSpec libraries.
Args:
cluster_spec: ClusterSpec returned by the TPUClusterResolver
expected_proto: Expected protobuf
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def mock_service_client(self, tpu_map=None):
if tpu_map is None:
tpu_map = {}
mock_locations = mock.MagicMock()
mock_locations.nodes.return_value = MockNodeClass(tpu_map)
mock_project = mock.MagicMock()
mock_project.locations.return_value = mock_locations
mock_client = mock.MagicMock()
mock_client.projects.return_value = mock_project
return mock_client
@mock.patch.object(resolver, 'is_running_in_gce', mock_is_running_in_gce)
def testCheckRunningInGceWithNoTpuName(self):
with self.assertRaisesRegex(ValueError,
'Please provide a TPU Name to connect to.*'):
resolver.TPUClusterResolver(tpu='')
@mock.patch.object(six.moves.urllib.request, 'urlopen',
mock_running_in_gce_urlopen)
def testIsRunningInGce(self):
self.assertTrue(resolver.is_running_in_gce())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRetrieveProjectAndZoneFromMetadata(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project=None,
zone=None,
tpu=['test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map),
coordinator_name='coordinator')
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'coordinator'
tasks { key: 0 value: '10.128.1.2:%s' }
}
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.3:8470' }
}
""" % cluster_resolver._coordinator_port
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRetrieveProjectAndZoneFromMetadataNoCoordinator(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project=None,
zone=None,
tpu=['test-tpu-1'],
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testNotReadyCloudTpu(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'CREATING'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project=None,
zone=None,
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
with self.assertRaises(RuntimeError):
cluster_resolver.cluster_spec()
def testSimpleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=['test-tpu-1'],
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'coordinator' tasks { key: 0 value: '10.128.1.5:10203' } }
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
def testFailedMetadata(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='nonexistent-tpu',
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
with self.assertRaises(ValueError) as context:
cluster_resolver.cluster_spec()
self.assertIn('Could not lookup TPU metadata', str(context.exception))
def testNewNetworkEndpointFormat(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health': 'HEALTHY',
'networkEndpoints': [{
'ipAddress': '10.2.3.4',
'port': 8470,
}]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'coordinator' tasks { key: 0 value: '10.128.1.5:10203' } }
job { name: 'worker' tasks { key: 0 value: '10.2.3.4:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual('grpc://10.2.3.4:8470', cluster_resolver.master())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testPodResolution(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
tpu='test-tpu-1',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map),
coordinator_name='coordinator')
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'coordinator',
tasks { key: 0 value: '10.128.1.2:%s'}
}
job {
name: 'worker'
tasks { key: 0 value: '10.2.3.4:8470' }
tasks { key: 1 value: '10.2.3.5:8470' }
tasks { key: 2 value: '10.2.3.6:8470' }
tasks { key: 3 value: '10.2.3.7:8470' }
}
""" % cluster_resolver._coordinator_port
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.4:8470')
def testPodResolutionNoCoordinator(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.2.3.4:8470' }
tasks { key: 1 value: '10.2.3.5:8470' }
tasks { key: 2 value: '10.2.3.6:8470' }
tasks { key: 3 value: '10.2.3.7:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.4:8470')
def testGetMasterNoEntries(self):
tpu_map = {}
with self.assertRaises(ValueError):
resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=[],
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
# TODO(saeta): Convert to parameterized test when included in OSS TF.
def verifyShouldResolve(self, tpu, should_resolve):
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=tpu,
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map={}))
self.assertEqual(should_resolve,
cluster_resolver._cloud_tpu_client.api_available(),
"TPU: '%s'" % tpu)
def testShouldResolveGrpc(self):
self.verifyShouldResolve('grpc://10.1.2.3:8470', False)
def testShouldResolveName(self):
self.verifyShouldResolve('mytpu', True)
def testShouldResolveList(self):
self.verifyShouldResolve(['myothertpu'], True)
def testShouldResolveGrpcPrefix(self):
self.verifyShouldResolve('grpctpu', True)
def testNoCallComputeMetadata(self):
cluster_resolver = resolver.TPUClusterResolver(tpu='grpc://10.1.2.3:8470')
self.assertEqual('grpc://10.1.2.3:8470', cluster_resolver.master())
self.assertEqual(
server_lib.ClusterSpec({
'worker': ['10.1.2.3:8470']
}).as_dict(),
cluster_resolver.cluster_spec().as_dict())
def testGkeEnvironmentForDonut(self):
os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] = 'grpc://10.120.27.5:8470'
self.assertIn('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', os.environ)
cluster_resolver = resolver.TPUClusterResolver()
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(cluster_resolver.master()))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.120.27.5:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
del os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS']
def testGkeEnvironmentForPod(self):
os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] = ('grpc://10.120.27.5:8470,'
'grpc://10.120.27.6:8470,'
'grpc://10.120.27.7:8470,'
'grpc://10.120.27.8:8470')
self.assertIn('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', os.environ)
cluster_resolver = resolver.TPUClusterResolver()
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(cluster_resolver.master()))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.120.27.5:8470' }
tasks { key: 1 value: '10.120.27.6:8470' }
tasks { key: 2 value: '10.120.27.7:8470' }
tasks { key: 3 value: '10.120.27.8:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
del os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS']
def testRpcDetectionForGrpcString(self):
cluster_resolver = resolver.TPUClusterResolver(
tpu='grpc://10.1.2.3:8470')
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
def testOverrideTaskTypeAndIndexAndGetMaster(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.4:8470')
cluster_resolver.task_type = 'worker'
cluster_resolver.task_id = 3
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.7:8470')
def testGetDeviceDictAndCoresWithTPUs(self):
devices = [
'/job:tpu_worker/task:0/device:TPU:0',
'/job:tpu_worker/task:1/device:TPU:1',
'/job:tpu_worker/task:2/device:TPU:0',
'/job:tpu_worker/task:3/device:TPU:1',
'/job:tpu_worker/task:0/device:TPU:4',
'/job:tpu_worker/task:1/device:TPU:5',
'/job:tpu_worker/task:2/device:TPU:4',
'/job:tpu_worker/task:3/device:TPU:5',
]
device_list = [
session._DeviceAttributes(name, 'TPU', 1024, 0) for name in devices
]
device_details = resolver.TPUClusterResolver._get_device_dict_and_cores(
device_list)
self.assertEqual(device_details.total_cores, 8)
self.assertEqual(device_details.device_map,
{'0': ['0', '4'],
'1': ['1', '5'],
'2': ['0', '4'],
'3': ['1', '5']})
def testGetDeviceDictAndCoresWithCPUsAndGPUs(self):
devices = [
'/job:tpu_worker/task:0/device:CPU:0',
'/job:tpu_worker/task:1/device:CPU:0',
'/job:tpu_worker/task:2/device:CPU:0',
'/job:tpu_worker/task:3/device:CPU:0',
'/job:tpu_worker/task:0/device:GPU:1',
'/job:tpu_worker/task:1/device:GPU:1',
'/job:tpu_worker/task:2/device:GPU:1',
'/job:tpu_worker/task:3/device:GPU:1',
]
device_list = [
session._DeviceAttributes(name, 'XLA', 1024, 0) for name in devices
]
device_dict, num_cores =\
resolver.TPUClusterResolver._get_device_dict_and_cores(device_list)
self.assertEqual(num_cores, 0)
self.assertEqual(device_dict, {})
def testVerifySameCoreCount(self):
self.assertEqual(
resolver.TPUClusterResolver
._verify_and_return_same_core_count({0: [0, 1, 2, 3, 4, 5, 6, 7]}), 8)
self.assertEqual(
resolver.TPUClusterResolver
._verify_and_return_same_core_count({
0: [0, 1],
1: [2, 3]
}), 2)
with self.assertRaises(RuntimeError):
resolver.TPUClusterResolver._verify_and_return_same_core_count(
{
0: [0],
1: [1, 2]
})
@mock.patch.object(framework.config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
@mock.patch.object(resolver, 'is_running_in_gce', mock_is_not_running_in_gce)
def testNumAcceleratorsSuccess(self, mock_list_devices,
mock_eager_list_devices):
devices = [
LogicalDevice('/job:tpu_worker/task:0/device:TPU:0', 'TPU'),
LogicalDevice('/job:tpu_worker/task:1/device:TPU:1', 'TPU'),
LogicalDevice('/job:tpu_worker/task:2/device:TPU:0', 'TPU'),
LogicalDevice('/job:tpu_worker/task:3/device:TPU:1', 'TPU'),
LogicalDevice('/job:tpu_worker/task:0/device:TPU:4', 'TPU'),
LogicalDevice('/job:tpu_worker/task:1/device:TPU:5', 'TPU'),
LogicalDevice('/job:tpu_worker/task:2/device:TPU:4', 'TPU'),
LogicalDevice('/job:tpu_worker/task:3/device:TPU:5', 'TPU'),
]
device_list = [
session._DeviceAttributes(d.name, d.device_type, 1024, 0)
for d in devices
]
mock_eager_list_devices.return_value = devices
mock_list_devices.return_value = device_list
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(cluster_resolver.num_accelerators(), {'TPU': 2})
@mock.patch.object(framework.config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
@mock.patch.object(resolver, 'is_running_in_gce', mock_is_not_running_in_gce)
def testNumAcceleratorsRetryFailure(self, mock_list_devices,
mock_eager_list_devices):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
service=self.mock_service_client(tpu_map=tpu_map))
mock_list_devices.side_effect = errors.DeadlineExceededError(
None, None, 'timeout')
mock_eager_list_devices.side_effect = errors.DeadlineExceededError(
None, None, 'timeout')
with self.assertRaises(RuntimeError):
cluster_resolver.num_accelerators()
def testLocalTpuResolver(self):
cr = resolver.TPUClusterResolver(tpu='local')
self.assertEqual(cr.get_master(), '')
if __name__ == '__main__':
test.main()
|
Intel-Corporation/tensorflow
|
tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py
|
Python
|
apache-2.0
| 23,979
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2011-2015, Yu-chen Kao (cybeliak)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Guess missing pronunciations in a phone file by a lexicon. """
__author__ = "cybeliak"
import sys
import re
import gzip
import argparse
RE_ENG = re.compile(r"^[A-Za-z]+$")
RE_SEP = re.compile(r"[-_]")
def query_words(word, lexicon, unknown):
if word in lexicon:
return lexicon[word].replace(" ", "+")
# If it's Pure ASCII
if any(ord(c) < 128 for c in word):
# If there's underscore or dash, guess the pronouncation by splitting the word
if RE_SEP.search(word):
subwords = RE_SEP.split(word)
allprons = []
for subword in subwords:
allprons.append(query_words(subword, lexicon, unknown))
if allprons[-1] == unknown:
return unknown
return "+".join(allprons)
# If it's English and it's too long, or it's not English, we are out of luck
if len(word) > 3 or not RE_ENG.match(word):
return unknown
# If not too long, treat it as an abbreviation
word = word.lower()
rslt = "+".join(lexicon.get(word[i]+".", lexicon.get(word[i], unknown)) \
for i in range(len(word))) .replace(" ", "+")
if rslt.find(unknown) == -1:
return rslt
# If it's not English, then guess from shorter words
for i in range(-1, -len(word), -1):
if word[:i] in lexicon:
part1 = lexicon[word[:i]].replace(" ", "+")
part2 = query_words(word[i:], lexicon, unknown)
if not part1 == unknown and not part2 == unknown:
return "%s+%s" % (part1, part2)
# Finally... still no luck
return unknown
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--replace-pattern", default=r"\?")
parser.add_argument("--unknown", default="?")
parser.add_argument("phone", type=argparse.FileType('r', encoding='utf-8'))
parser.add_argument("text", type=argparse.FileType('r', encoding='utf-8'))
parser.add_argument("lex", type=argparse.FileType('r', encoding='utf-8'))
args = parser.parse_args()
pattern = re.compile("^%s$" % (args.replace_pattern), re.U)
if args.phone.name.endswith(".gz"):
args.phone.close()
args.phone = gzip.open(args.phone.name, "rt", encoding='utf-8')
if args.text.name.endswith(".gz"):
args.text.close()
args.text = gzip.open(args.text.name, "rt", encoding='utf-8')
if args.lex.name.endswith(".gz"):
args.lex.close()
args.lex = gzip.open(args.lex.name, "rt", encoding='utf-8')
# loading lexicon
lexicon = {}
probability = {}
for line in args.lex:
# Format: word prob lang phone1 [phone2 ...]
parsed = line.strip().split(None, 3)
if len(parsed) < 4:
continue
if parsed[0] in lexicon and probability[parsed[0]] >= float(parsed[1]):
continue
lexicon[parsed[0]] = parsed[-1]
probability[parsed[0]] = float(parsed[1])
for linephone, linetext in zip(args.phone, args.text):
parsedphone = linephone.strip().split()
parsedtext = linetext.strip().split()
if len(parsedphone) != len(parsedtext):
raise RuntimeError("phone and text mismatch: \n%s\n%s" \
% (parsedphone, parsedtext))
# Start from 1 since position 0 is the utterance key
for i in range(1, len(parsedphone)):
if pattern.match(parsedphone[i]) is None:
continue
parsedphone[i] = query_words(parsedtext[i], lexicon, args.unknown)
print(" ".join(parsedphone))
if __name__ == '__main__':
main()
|
cybeliak/mispronounceableness
|
mpn_utils/guess_pron_from_lex.py
|
Python
|
apache-2.0
| 4,324
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data Util for metric learning project."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.io as sio
import tensorflow as tf
from tensorflow.python.platform import gfile # tf.gfile does NOT work with big mat files.
import retina.response_model.python.metric_learning.analyse_metric as analyse
class DataUtilsMetric(object):
"""Get repeats, triplets for metric learning."""
def __init__(self, path):
"""Load responses from 'repeats'."""
print('Loading from: '+ str(path))
data_file = gfile.Open(path, 'r')
data = sio.loadmat(data_file)
self._responses = data['repeats']
self.n_cells = self._responses.shape[2]
self.n_trials = self._responses.shape[0]
self.n_times = self._responses.shape[1]
mean_response = np.mean(np.reshape(self._responses,
[-1, self._responses.shape[2]]), 0)
self.mean_response = np.squeeze(mean_response)
# Check if the data also contains stimulus.
if 'stimulus' in data.keys():
self._stimulus = data['stimulus']
else:
self._stimulus = None
if 'centers' in data.keys():
self._centers = data['centers']
else:
self._centers = None
if 'ttf' in data.keys():
self.ttf = np.squeeze(data['ttf'])
else:
self.ttf = None
if 'cell_type' in data.keys():
self.cell_type = np.squeeze(data['cell_type'])
else:
self.cell_type = None
def get_mean_response(self):
"""Return mean response for each cell."""
return self.mean_response
def get_cell_type(self):
"""Return cell type of cells. (+1 for type 1, -1 for type 2)."""
return self.cell_type
def get_centers(self):
"""Returns center location of cells."""
return self._centers
def get_repeats(self):
"""Returns response matrix: (Trials x Time x Cells)."""
return self._responses
def get_stimulus(self):
"""Returns stimulus matrix: None or (Time x Dimension X x Dimension Y)."""
return self._stimulus
def get_all_responses(self, time_window):
"""Return all the response (flatten repeats) in same format as triplets.
Args :
time_window (int) : The number of continuous time bins for
each response.
Returns :
all_responses : All the responses (batch x cells x time_window).
stim_time : Time index of corresponding stimulus.
"""
n_trials = self._responses.shape[0]
n_times = self._responses.shape[1]
n_cells = self._responses.shape[2]
all_responses = np.zeros((n_trials * (n_times - time_window + 1),
n_cells, time_window))
icnt = 0
stim_time = []
for itrial in range(n_trials):
for itime in range(n_times - time_window + 1):
all_responses[icnt, :, :] = self._responses[itrial,
itime: itime+time_window,
:].T
stim_time += [itime]
icnt += 1
stim_time = np.array(stim_time)
return all_responses, stim_time
def get_triplets(self, batch_size=1000, time_window=50):
"""Get a batch of triplets (anchor, positive, negative).
'anchor' and 'positive' are responses to same stimulus in different trials
and 'negative' is response to another stimulus.
Args:
batch_size (int) : batch size of triplets
time_window (int) : number of continuous time bins to
include in each example
Returns:
anchor : Numpy array of anchor (batch x cells x time_window).
pos : positive examples - near anchor
neg : negative examples - far from anchor
time_log : Times for anchor/positive and negative examples (batch x 2)
trial_log : Trials for anchor/negative and positive examples (batch x 2)
0: A dummy output to make number of outputs to 6
"""
# setup basic parameters
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
anchor = np.zeros((batch_size, n_cells, time_window))
pos = np.zeros((batch_size, n_cells, time_window))
neg = np.zeros((batch_size, n_cells, time_window))
time_log = np.zeros((batch_size, 2))
trial_log = np.zeros((batch_size, 2))
for iexample in range(batch_size):
# sample random trials
random_trials = np.random.choice(n_trials, 2, replace=False)
# sample random times which are atleast time_window apart.
# time for anchor and positive example.
itime1 = np.random.randint(response_length - time_window)
# time for negative example.
itime2 = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
while np.abs(itime1 - itime2) < time_window:
itime2 = np.random.randint(response_length - time_window)
anchor[iexample, :, :] = self._responses[random_trials[0],
itime1: itime1 + time_window,
:].T
pos[iexample, :, :] = self._responses[random_trials[1],
itime1: itime1 + time_window, :].T
neg[iexample, :, :] = self._responses[random_trials[0],
itime2: itime2 + time_window, :].T
time_log[iexample, :] = np.array([itime1, itime2])
trial_log[iexample, :] = random_trials
return anchor, pos, neg, time_log, trial_log, 0
def get_tripletsB(self, batch_size=1000, time_window=50):
"""Get batch of triplets with negatives of same stimulus, no correlations.
'anchor' and 'positive' are responses to a stimulus in different trials
and 'negative' is response to same stimulus, but randomly sampled trials
across cells.
Args:
batch_size (int) : batch size of triplets
time_window (int) : number of continuous time bins to
include in each example
Returns:
anchor : Numpy array of anchor (batch x cells x time_window).
pos : positive examples - near anchor
neg : negative examples - far from anchor
time_log : Times for anchor/positive and negative examples (batch x 2)
trial_log : Trials for anchor/positive and positive examples (batch x 2)
trial_log_negatives : Trials for negatives (batch x n_cells)
"""
# setup basic parameters
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
anchor = np.zeros((batch_size, n_cells, time_window))
pos = np.zeros((batch_size, n_cells, time_window))
neg = np.zeros((batch_size, n_cells, time_window))
time_log = np.zeros((batch_size))
trial_log = np.zeros((batch_size, 2))
trial_log_negatives = np.zeros((batch_size, n_cells))
for iexample in range(batch_size):
# sample random trials
random_trials = np.random.choice(n_trials, 2, replace=False)
remaining_trials = np.setdiff1d(np.arange(n_trials), random_trials)
if remaining_trials == []:
tf.logging.info('No trials left for negatives.')
continue
negative_trials = np.random.choice(remaining_trials, n_cells)
# sample random times which are atleast time_window apart.
# time for examples.
itime1 = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
anchor[iexample, :, :] = self._responses[random_trials[0],
itime1: itime1 + time_window,
:].T
pos[iexample, :, :] = self._responses[random_trials[1],
itime1: itime1 + time_window, :].T
for icell in range(n_cells):
neg[iexample, icell, :] = self._responses[negative_trials[icell],
itime1:
itime1 + time_window, icell].T
time_log[iexample] = np.array(itime1)
trial_log[iexample, :] = random_trials
trial_log_negatives[iexample, :] = negative_trials
return anchor, pos, neg, time_log, trial_log, trial_log_negatives
def get_response_all_trials(self, n_stims, time_window, random_seed=234):
""" Get population responses for all repeats of few stimuli.
Args :
n_stims (int) : Number of stimuli.
time_window (int) : Number of successive time bins for each response.
Returns :
r : Collection of responses (# responses x # cells x time_window)
stim_id (int): The stimulus time of each response point (# responses)
"""
from numpy.random import RandomState
prng = RandomState(random_seed)
print('Setting local pseudo-random number generator')
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
tms = prng.randint(0, response_length, n_stims)
n_responses = n_trials * n_stims
r = np.zeros((n_responses, n_cells, time_window))
for itm_cnt, itm in enumerate(tms):
r[itm_cnt*n_trials :
(itm_cnt+1)*n_trials, :, :] = np.transpose(self._responses[:, itm: itm + time_window, :], [0, 2, 1])
stim_id = np.repeat(tms, n_trials, 0)
return r, stim_id
def get_all_response_few_trials(self, n_trials, time_window):
"""Get population responses for few repeats of all stimuli.
Args :
n_trials (int) : Number of trials for which to give resposne.
time_window (int) : Number of successive time bins for each response.
Returns :
r : Collection of responses. (# trials x # cells x time_window)
trials_sample (int): ID of trials selected. (# trials)
"""
from numpy.random import RandomState
prng = RandomState(250)
print('Setting local pseudo-random number generator')
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
total_cells = self._responses.shape[0]
r = np.zeros((n_trials, response_length, n_cells, time_window))
trials_sample = prng.randint(0, total_cells, n_trials)
for itrial_cnt, itrial in enumerate(trials_sample):
for istim in range(response_length):
r[itrial_cnt, istim, :, :] = np.transpose(np.expand_dims(
self._responses[itrial, istim: istim + time_window, :], 0),
[0, 2, 1])
return r, trials_sample
# TODO(bhaishahster) : Add triplets by methods (b,c) with negatives generated by mixing trials
# Additional triplet methods
def get_tripletsC(self, batch_size=1000, time_window=50):
"""Get batch of triplets with negatives of same stimulus, no correlations.
'anchor' and 'positive' are responses to different stimuli
and 'negative' is response to different stimulus, and different repeats
(d(X,X-) < d(X,Xh-))
Args:
batch_size (int) : batch size of triplets
time_window (int) : number of continuous time bins to
include in each example
Returns:
anchor : Numpy array of anchor (batch x cells x time_window).
pos : positive examples - near anchor
neg : negative examples - far from anchor
time_log : Times for anchor/positive and negative examples (batch x 2)
trial_log : Trials for anchor/positive and positive examples (batch x 2)
trial_log_negatives : Trials for negatives (batch x n_cells)
"""
# setup basic parameters
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
anchor = np.zeros((batch_size, n_cells, time_window))
pos = np.zeros((batch_size, n_cells, time_window))
neg = np.zeros((batch_size, n_cells, time_window))
time_log = np.zeros((batch_size))
trial_log = np.zeros((batch_size, 2))
trial_log_negatives = np.zeros((batch_size, n_cells))
for iexample in range(batch_size):
# sample random trials
random_trials = np.random.choice(n_trials, 2, replace=False)
remaining_trials = np.setdiff1d(np.arange(n_trials), random_trials)
if remaining_trials == []:
tf.logging.info('No trials left for negatives.')
continue
negative_trials = np.random.choice(remaining_trials, n_cells)
# sample random times which are atleast time_window apart.
# time for examples.
itime1 = np.random.randint(response_length - time_window)
# time for negative example.
itime2 = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
while np.abs(itime1 - itime2) < time_window:
itime2 = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
anchor[iexample, :, :] = self._responses[random_trials[0],
itime1: itime1 + time_window,
:].T
pos[iexample, :, :] = self._responses[random_trials[1],
itime2: itime2 + time_window, :].T
for icell in range(n_cells):
neg[iexample, icell, :] = self._responses[negative_trials[icell],
itime2:
itime2 + time_window, icell].T
time_log[iexample] = np.array(itime1)
trial_log[iexample, :] = random_trials
trial_log_negatives[iexample, :] = negative_trials
return anchor, pos, neg, time_log, trial_log, trial_log_negatives
def get_tripletsD(self, batch_size=1000, time_window=50):
"""Get batch of triplets with negatives of same stimulus, no correlations.
'anchor' and 'positive' are responses to same stimuli,
but positive has trials for different cells all mixed up
and 'negative' is response to different stimulus,
and trials for different cells mixed up
(d(X,Xh+)<d(X, Xh-))
Args:
batch_size (int) : batch size of triplets
time_window (int) : number of continuous time bins to
include in each example
Returns:
anchor : Numpy array of anchor (batch x cells x time_window).
pos : positive examples - near anchor
neg : negative examples - far from anchor
time_log : Times for anchor/positive and negative examples (batch x 2)
trial_log : Trials for anchor/positive and positive examples (batch x 2)
trial_log_negatives : Trials for negatives (batch x n_cells)
"""
# setup basic parameters
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
anchor = np.zeros((batch_size, n_cells, time_window))
pos = np.zeros((batch_size, n_cells, time_window))
neg = np.zeros((batch_size, n_cells, time_window))
time_log = np.zeros((batch_size))
trial_log = np.zeros((batch_size, 2))
trial_log_negatives = np.zeros((batch_size, n_cells))
for iexample in range(batch_size):
# sample random trials
random_trials = np.random.choice(n_trials, 1, replace=False)
remaining_trials = np.setdiff1d(np.arange(n_trials), random_trials)
if remaining_trials == []:
tf.logging.info('No trials left for negatives.')
continue
positive_trials = np.random.choice(remaining_trials, n_cells)
negative_trials = np.random.choice(remaining_trials, n_cells)
# sample random times which are atleast time_window apart.
# time for examples.
itime1 = np.random.randint(response_length - time_window)
# time for negative example.
itime2 = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
while np.abs(itime1 - itime2) < time_window:
itime2 = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
anchor[iexample, :, :] = self._responses[random_trials[0],
itime1: itime1 + time_window,
:].T
for icell in range(n_cells):
pos[iexample, icell, :] = self._responses[positive_trials[icell],
itime1:
itime1 + time_window, icell].T
for icell in range(n_cells):
neg[iexample, icell, :] = self._responses[negative_trials[icell],
itime2:
itime2 + time_window, icell].T
time_log[iexample] = np.array(itime1)
trial_log[iexample, :] = random_trials
trial_log_negatives[iexample, :] = negative_trials
return anchor, pos, neg, time_log, trial_log, trial_log_negatives
def get_triplets_mix(self, batch_size, time_window, score):
"""Return hard triplets which are hard for the score function."""
# Get triplets A
outputs_a = self.get_triplets(batch_size, time_window)
anchor_batch_a, pos_batch_a, neg_batch_a, _, _, _ = outputs_a
outputs_a = list(outputs_a) + [None]
_, _, accuracy_a = analyse.compute_distances(score, anchor_batch_a,
pos_batch_a, neg_batch_a)
error_a = 1 - accuracy_a
# Get triplets B
outputs_b = self.get_tripletsB(batch_size, time_window)
anchor_batch_b, pos_batch_b, neg_batch_b, _, _, _ = outputs_b
_, _, accuracy_b = analyse.compute_distances(score, anchor_batch_b,
pos_batch_b, neg_batch_b)
error_b = 1 - accuracy_b
# Get triplets C
outputs_c = self.get_tripletsC(batch_size, time_window)
anchor_batch_c, pos_batch_c, neg_batch_c, _, _, _ = outputs_c
_, _, accuracy_c = analyse.compute_distances(score, anchor_batch_c,
pos_batch_c, neg_batch_c)
error_c = 1 - accuracy_c
# Get triplets D
outputs_d = self.get_tripletsD(batch_size, time_window)
anchor_batch_d, pos_batch_d, neg_batch_d, _, _, _ = outputs_d
_, _, accuracy_d = analyse.compute_distances(score, anchor_batch_d,
pos_batch_d, neg_batch_d)
error_d = 1 - accuracy_d
errs = np.array([error_a, error_b, error_c, error_d])
probs = np.exp(errs)/np.sum(np.exp(errs))
# print(score.iter , probs)
xx = np.random.random()
if xx < probs[0] :
return outputs_a
elif xx < probs[0] + probs[1]:
return outputs_b
elif xx < probs[0] + probs[1] + probs[2]:
return outputs_c
else :
return outputs_d
def get_stimulus_response_samples(self, batch_size, time_window):
"""Get a few samples of stimulus and response
Args :
batch_size (int) : number of examples.
time_window (int) : number of continuous time bins to
include in each example
Returns :
stimulus_examples : Numpy array of stimulus samples
(batch x dimx x dimy).
response_examples : Numpy array of response samples
(batch x cells x time_window).
time_log : Times for anchor/positive and negative examples (batch)
trial_log : Trials for anchor/negative and positive examples (batch)
"""
if self._stimulus is None:
tf.logging.info('Stimulus not found.')
return None
# setup basic parameters
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
dimx = self._stimulus.shape[1]
dimy = self._stimulus.shape[2]
response_examples = np.zeros((batch_size, n_cells, time_window))
stimulus_examples = np.zeros((batch_size, dimx, dimy))
time_log = np.zeros((batch_size, 1))
trial_log = np.zeros((batch_size, 1))
for iexample in range(batch_size):
# sample random trials
random_trial = np.random.choice(n_trials, 1, replace=False)
# sample random times which are atleast time_window apart.
# time for anchor and positive example.
itime = np.random.randint(response_length - time_window)
stimulus_examples[iexample, :, :] = self._stimulus[itime, :, :]
response_examples[iexample, :, :] = self._responses[random_trial[0],
itime: itime +
time_window,
:].T
time_log[iexample] = np.array(itime)
trial_log[iexample] = random_trial
return stimulus_examples, response_examples, time_log, trial_log
def get_triplets_batch(self, batch_size=1000, time_window=50):
"""Get pairs of postitives (anchor, positive) and a batch of negatives.
'anchor' and 'positive' are responses to same stimulus in different trials
and 'negative' is response to another stimulus.
Args:
batch_size (int) : batch size of triplets
time_window (int) : number of continuous time bins to
include in each example
Returns:
anchor : Numpy array of anchor (batch x cells x time_window).
pos : positive examples - near anchor
neg : negative examples - far from anchor
time_log : Times for anchor/positive and negative examples (batch x 2)
trial_log : Trials for anchor/negative and positive examples (batch x 2)
0: A dummy output to make number of outputs to 6
"""
# setup basic parameters
n_trials = self._responses.shape[0]
response_length = self._responses.shape[1]
n_cells = self._responses.shape[2]
anchor = np.zeros((batch_size, n_cells, time_window))
pos = np.zeros((batch_size, n_cells, time_window))
neg = np.zeros((batch_size, n_cells, time_window))
# generate postitive pairs
pos_times = []
for iexample in range(batch_size):
# sample random trials
random_trials = np.random.choice(n_trials, 2, replace=False)
# sample random times which are atleast time_window apart.
# time for anchor and positive example.
itime = np.random.randint(response_length - time_window)
pos_times += [itime]
anchor[iexample, :, :] = self._responses[random_trials[0],
itime: itime + time_window,
:].T
pos[iexample, :, :] = self._responses[random_trials[1],
itime: itime + time_window, :].T
pos_times = np.array(pos_times)
for iexample in range(batch_size):
# sample random trials
random_trials = np.random.choice(n_trials, 1)
# time for negative example.
itime = np.random.randint(response_length - time_window)
# time for anchor and negative not too close.
while np.min(np.abs(itime - pos_times)) < time_window:
itime = np.random.randint(response_length - time_window)
neg[iexample, :, :] = self._responses[random_trials[0],
itime: itime + time_window, :].T
return anchor, pos, neg, 0, 0, 0
|
googlearchive/rgc-models
|
response_model/python/metric_learning/data_util.py
|
Python
|
apache-2.0
| 24,111
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import io
import os
from oslo_serialization import jsonutils
import queue
import sys
import fixtures
import testtools
from magnumclient.common import httpclient as http
from magnumclient import shell
FAKE_ENV = {'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_PROJECT_NAME': 'project_name',
'OS_AUTH_URL': 'http://no.where/v2.0'}
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(fixtures.FakeLogger())
class FakeAPI(object):
def __init__(self, responses):
self.responses = responses
self.calls = []
def _request(self, method, url, headers=None, body=None):
call = (method, url, headers or {}, body)
self.calls.append(call)
return self.responses[url][method]
def raw_request(self, *args, **kwargs):
response = self._request(*args, **kwargs)
body_iter = http.ResponseBodyIterator(io.StringIO(response[1]))
return FakeResponse(response[0]), body_iter
def json_request(self, *args, **kwargs):
response = self._request(*args, **kwargs)
return FakeResponse(response[0]), response[1]
class FakeConnection(object):
def __init__(self, response=None, **kwargs):
self._response = queue.Queue()
self._response.put(response)
self._last_request = None
self._exc = kwargs['exc'] if 'exc' in kwargs else None
if 'redirect_resp' in kwargs:
self._response.put(kwargs['redirect_resp'])
def request(self, method, conn_url, **kwargs):
self._last_request = (method, conn_url, kwargs)
if self._exc:
raise self._exc
def setresponse(self, response):
self._response = response
def getresponse(self):
return self._response.get()
class FakeResponse(object):
def __init__(self, headers, body=None, version=None, status=None,
reason=None):
"""Fake object to help testing.
:param headers: dict representing HTTP response headers
:param body: file-like object
"""
self.headers = headers
self.body = body
self.version = version
self.status = status
self.reason = reason
def __getitem__(self, key):
if key == 'location':
return 'fake_url'
else:
return None
def getheaders(self):
return copy.deepcopy(self.headers).items()
def getheader(self, key, default):
return self.headers.get(key, default)
def read(self, amt):
return self.body.read(amt)
class FakeServiceCatalog(object):
def url_for(self, endpoint_type, service_type, attr=None,
filter_value=None):
if attr == 'region' and filter_value:
return 'http://regionhost:6385/v1/f14b41234'
else:
return 'http://localhost:6385/v1/f14b41234'
class FakeKeystone(object):
service_catalog = FakeServiceCatalog()
timestamp = datetime.datetime.utcnow() + datetime.timedelta(days=5)
def __init__(self, auth_token):
self.auth_token = auth_token
self.auth_ref = {
'token': {'expires': FakeKeystone.timestamp.strftime(
'%Y-%m-%dT%H:%M:%S.%f'),
'id': 'd1a541311782870742235'}
}
class TestCase(testtools.TestCase):
TEST_REQUEST_BASE = {
'verify': True,
}
def setUp(self):
super(TestCase, self).setUp()
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
def make_env(self, exclude=None, fake_env=FAKE_ENV):
env = dict((k, v) for k, v in fake_env.items() if k != exclude)
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
def shell(self, argstr, exitcodes=(0,)):
orig = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
_shell = shell.OpenStackMagnumShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertIn(exc_value.code, exitcodes)
finally:
stdout = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
stderr = sys.stderr.getvalue()
sys.stderr.close()
sys.stderr = orig_stderr
return (stdout, stderr)
class FakeSessionResponse(object):
def __init__(self, headers, content=None, status_code=None):
self.headers = headers
self.content = content
self.status_code = status_code
def json(self):
if self.content is not None:
return jsonutils.loads(self.content)
else:
return {}
class FakeSession(object):
def __init__(self, headers, content=None, status_code=None):
self.headers = headers
self.content = content
self.status_code = status_code
def request(self, url, method, **kwargs):
return FakeSessionResponse(self.headers, self.content,
self.status_code)
|
openstack/python-magnumclient
|
magnumclient/tests/utils.py
|
Python
|
apache-2.0
| 6,314
|
"""
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .. import system_tests_common as Common
from arakoon.ArakoonExceptions import *
from Compat import X
import arakoon
import time
import logging
from threading import Thread
from nose.tools import *
@Common.with_custom_setup(Common.setup_2_nodes_forced_master, Common.basic_teardown)
def test_collapse_witness_node():
master = Common.node_names[0]
witness = Common.node_names[1]
n = 298765
Common.iterate_n_times(n, Common.simple_set)
logging.info("did %i sets, now going into collapse scenario" % n)
Common.collapse(witness,1)
logging.info("collapsing done")
Common.stopOne(master)
Common.wipe(master)
Common.startOne(master)
cli = Common.get_client()
assert_false(cli.expectProgressPossible())
up2date = False
counter = 0
while not up2date and counter < 100:
time.sleep(1.0)
counter = counter + 1
up2date = cli.expectProgressPossible()
logging.info("catchup from collapsed node finished")
@Common.with_custom_setup(Common.setup_2_nodes, Common.basic_teardown)
def test_collapse():
zero = Common.node_names[0]
one = Common.node_names[1]
n = 298765
Common.iterate_n_times(n, Common.simple_set)
logging.info("did %i sets, now going into collapse scenario" % n)
Common.collapse(zero,1)
logging.info("collapsing done")
Common.stopOne(one)
Common.wipe(one)
Common.startOne(one)
cli = Common.get_client()
assert_false(cli.expectProgressPossible())
up2date = False
counter = 0
while not up2date and counter < 100:
time.sleep(1.0)
counter = counter + 1
up2date = cli.expectProgressPossible()
logging.info("catchup from collapsed node finished")
|
Incubaid/arakoon
|
pylabs/test/server/left/test_collapse.py
|
Python
|
apache-2.0
| 2,292
|
# Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Function names
GET_ATTR = 'get_attr'
GET_PARAM = 'get_param'
|
openstack/vitrage
|
vitrage/evaluator/template_functions/__init__.py
|
Python
|
apache-2.0
| 636
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import argparse
from nlp.segment.joint_bilstm_crf import train
from nlp.segment.joint_bilstm_crf import test
def main(args):
if args.process == tf.estimator.ModeKeys.TRAIN:
train.train(args)
elif args.process == tf.estimator.ModeKeys.PREDICT:
test.joint_predict(args)
else:
raise Exception("cannot support this process:" + args.process)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', required=True, type=str, default='data/bi_lstm_crf_segment/ckpt')
parser.add_argument('--data_dir', required=True, type=str, default='data/bi_lstm_crf_segment/data')
parser.add_argument('--utils_dir', required=True, type=str, default='data/bi_lstm_crf_segment/utils')
parser.add_argument('--vocab_dir', required=True, type=str, default='data/bi_lstm_crf_segment/vocab')
parser.add_argument('--optimizer', type=str, default='adagrad')
parser.add_argument('--tag_scheme', type=str, default='BIES')
parser.add_argument('--ngram', type=int, default=3)
parser.add_argument('--word_vector', type=bool, default=True)
parser.add_argument('--pre_embeddings', type=bool, default=True)
parser.add_argument('--gru', type=bool, default=True)
parser.add_argument('--clipping', type=bool, default=True)
parser.add_argument('--ensemble', type=bool, default=False)
parser.add_argument('--tag_large', type=bool, default=False)
parser.add_argument('--crf', type=int, default=1)
parser.add_argument('--cell_dimension', type=int, default=200)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--embeddings_dimension', type=int, default=64)
parser.add_argument('--num_epochs', type=int, default=30)
parser.add_argument('--large_size', type=int, default=200000)
parser.add_argument('--bucket_size', type=int, default=10)
parser.add_argument('--dropout_rate', type=float, default=0.5)
parser.add_argument('--learning_rate', type=float, default=0.1)
parser.add_argument('--decay_rate', type=float, default=0.05)
parser.add_argument('--predict_file', type=str)
parser.add_argument('--result_file', type=str)
parser.add_argument('--process', type=str, default='train')
args = parser.parse_args()
main(args)
|
koala-ai/tensorflow_nlp
|
nlp/segment/joint_bilstm_crf/run.py
|
Python
|
apache-2.0
| 2,419
|
#unit
#mydict.py
class Dict(dict):
def __init__(self,**kw):
super(Dict,self).__init__(**kw)
def __getattr__(self,key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object han no attribute'%s'" %key)
def __setattr__(self,key,value):
self[key]=value
|
zengboming/python
|
mydict.py
|
Python
|
apache-2.0
| 293
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation.
See the [Sparse Ops](https://tensorflow.org/api_guides/python/sparse_ops) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
def _make_int64_tensor(value, name):
if isinstance(value, compat.integral_types):
return ops.convert_to_tensor(value, name=name, dtype=dtypes.int64)
if not isinstance(value, ops.Tensor):
raise TypeError("{} must be an integer value".format(name))
if value.dtype == dtypes.int64:
return value
return math_ops.cast(value, dtypes.int64)
@tf_export("sparse.expand_dims")
def sparse_expand_dims(sp_input, axis=None, name=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `sp_input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `sp_input`'s shape. The dimension index `axis`
starts at zero; if you specify a negative number for `axis` it is counted
backwards from the end.
Args:
sp_input: A `SparseTensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(sp_input) - 1,
rank(sp_input)]`.
name: The name of the output `SparseTensor`.
Returns:
A `SparseTensor` with the same data as `sp_input`, but its shape has an
additional dimension of size 1 added.
"""
rank = sp_input.dense_shape.get_shape()[0]
axis = -1 if axis is None else axis
with ops.name_scope(name, default_name="expand_dims", values=[sp_input]):
if isinstance(axis, compat.integral_types):
axis = ops.convert_to_tensor(axis, name="axis", dtype=dtypes.int32)
elif not isinstance(axis, ops.Tensor):
raise TypeError("axis must be an integer value in range [-rank(sp_input)"
" - 1, rank(sp_input)]")
# Convert axis to a positive value if it is negative.
axis = array_ops.where(axis >= 0, axis, axis + rank + 1)
# Create the new column of indices for the sparse tensor by slicing
# the indices and inserting a new column of indices for the new dimension.
column_size = array_ops.shape(sp_input.indices)[0]
new_index = array_ops.zeros([column_size, 1], dtype=dtypes.int64)
indices_before = array_ops.slice(sp_input.indices, [0, 0], [-1, axis])
indices_after = array_ops.slice(sp_input.indices, [0, axis], [-1, -1])
indices = array_ops.concat(
[indices_before, new_index, indices_after], axis=1)
# Create the new dense shape by splicing the tensor [1] in the correct
# dimension of the existing shape.
shape_before = array_ops.slice(sp_input.dense_shape, [0], [axis])
shape_after = array_ops.slice(sp_input.dense_shape, [axis], [-1])
new_shape = ops.convert_to_tensor([1], name="new_shape", dtype=dtypes.int64)
shape = array_ops.concat([shape_before, new_shape, shape_after], axis=0)
# Create the output sparse tensor.
return sparse_tensor.SparseTensor(
indices=indices, values=sp_input.values, dense_shape=shape)
@tf_export("sparse.eye")
def sparse_eye(num_rows,
num_columns=None,
dtype=dtypes.float32,
name=None):
"""Creates a two-dimensional sparse tensor with ones along the diagonal.
Args:
num_rows: Non-negative integer or `int32` scalar `tensor` giving the number
of rows in the resulting matrix.
num_columns: Optional non-negative integer or `int32` scalar `tensor` giving
the number of columns in the resulting matrix. Defaults to `num_rows`.
dtype: The type of element in the resulting `Tensor`.
name: A name for this `Op`. Defaults to "eye".
Returns:
A `SparseTensor` of shape [num_rows, num_columns] with ones along the
diagonal.
"""
with ops.name_scope(name, default_name="eye", values=[num_rows, num_columns]):
num_rows = _make_int64_tensor(num_rows, "num_rows")
num_columns = num_rows if num_columns is None else _make_int64_tensor(
num_columns, "num_columns")
# Create the sparse tensor.
diag_size = math_ops.minimum(num_rows, num_columns)
diag_range = math_ops.range(diag_size, dtype=dtypes.int64)
return sparse_tensor.SparseTensor(
indices=array_ops.stack([diag_range, diag_range], axis=1),
values=array_ops.ones(diag_size, dtype=dtype),
dense_shape=[num_rows, num_columns])
# pylint: disable=protected-access
@tf_export("sparse.concat", "sparse_concat")
@deprecation.deprecated_endpoints("sparse_concat")
@deprecation.deprecated_args(
None, "concat_dim is deprecated, use axis instead", "concat_dim")
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:]
if axis == -1 else shape[axis:axis + 1], []
if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse.add", v1=["sparse.add", "sparse_add"])
@deprecation.deprecated_endpoints("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The shapes of the two operands must match: broadcasting is not supported.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
b = _convert_to_sparse_tensor(b)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh")
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape, thresh))
# Attempt to get output_shape statically.
a.get_shape().assert_is_compatible_with(b.get_shape())
static_shape = array_ops.broadcast_static_shape(a.get_shape(),
b.get_shape())
if static_shape.is_fully_defined():
output_shape = static_shape.as_list()
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values,
a.dense_shape, b)
@tf_export("sparse.cross")
def sparse_cross(inputs, name=None):
"""Generates sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: "a_X_d_X_f"
[1, 0]: "b_X_e_X_g"
[1, 1]: "c_X_e_X_g"
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `string`.
"""
return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name)
_sparse_cross = sparse_cross
@tf_export("sparse.cross_hashed")
def sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
"""Generates hashed sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: FingerprintCat64(
Fingerprint64("f"), FingerprintCat64(
Fingerprint64("d"), Fingerprint64("a")))
[1, 0]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("b")))
[1, 1]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("c")))
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
num_buckets: An `int` that is `>= 0`.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: Integer hash_key that will be used by the `FingerprintCat64`
function. If not given, will use a default key.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `int64`.
"""
return _sparse_cross_internal(
inputs=inputs,
hashed_output=True,
num_buckets=num_buckets,
hash_key=hash_key,
name=name)
_sparse_cross_hashed = sparse_cross_hashed
_DEFAULT_HASH_KEY = 0xDECAFCAFFE
def _sparse_cross_internal(inputs,
hashed_output=False,
num_buckets=0,
hash_key=None,
name=None):
"""See gen_sparse_ops.sparse_cross."""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(
isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [
i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
]
dense_inputs = [
i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
hashed_output=hashed_output,
num_buckets=num_buckets,
hash_key=hash_key or _DEFAULT_HASH_KEY,
out_type=out_type,
internal_type=internal_type,
name=name)
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
@tf_export("sparse.reorder", v1=["sparse.reorder", "sparse_reorder"])
@deprecation.deprecated_endpoints("sparse_reorder")
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (
gen_sparse_ops.sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
if sp_input.get_shape().is_fully_defined():
dense_shape = sp_input.get_shape().as_list()
else:
dense_shape = array_ops.identity(sp_input.dense_shape)
return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)
@tf_export("sparse.reshape", v1=["sparse.reshape", "sparse_reshape"])
@deprecation.deprecated_endpoints("sparse_reshape")
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If argument `shape` requests a `SparseTensor` with a different
number of elements than `sp_input`.
ValueError: If `shape` has more than one inferred (== -1) dimension.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
shape = math_ops.cast(shape, dtype=dtypes.int64)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops.sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
reshaped_shape_const = tensor_util.constant_value(shape)
if (reshaped_shape_const is not None and
sp_input.get_shape().is_fully_defined()):
num_implied = sum((dim == -1) for dim in reshaped_shape_const)
if num_implied > 1:
raise ValueError("At most one dimension can be inferred (-1). Found: %s"
% reshaped_shape_const)
original_reshaped_shape = list(reshaped_shape_const) # Copy.
in_shape_size = np.prod(sp_input.get_shape().as_list())
if num_implied:
implied_idx = original_reshaped_shape.index(-1)
non_implied_idx = (
original_reshaped_shape[:implied_idx] +
original_reshaped_shape[implied_idx + 1:])
reshaped_shape_const[implied_idx] = (
in_shape_size // np.prod(non_implied_idx))
reshaped_size = np.prod(reshaped_shape_const)
if reshaped_size != in_shape_size:
raise ValueError("Cannot reshape a tensor with %d elements to shape %s "
"(%d elements)." %
(in_shape_size, original_reshaped_shape,
reshaped_size))
reshaped_shape = reshaped_shape_const
return sparse_tensor.SparseTensor(reshaped_ind,
array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
@tf_export("sparse.split", "sparse_split")
@deprecation.deprecated_endpoints("sparse_split")
@deprecation.deprecated_args(
None, "split_dim is deprecated, use axis instead", "split_dim")
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None,
num_split=None,
axis=None,
name=None,
split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (
gen_sparse_ops.sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(output_inds[i], output_vals[i],
output_shapes[i]))
return sparse_tensors
@tf_export("sparse.slice", v1=["sparse.slice", "sparse_slice"])
@deprecation.deprecated_endpoints("sparse_slice")
def sparse_slice(sp_input, start, size, name=None):
"""Slice a `SparseTensor` based on the `start` and `size.
For example, if the input is
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
sparse.slice([0, 0], [2, 4]) = shape = [2, 4]
[ a ]
[b c ]
sparse.slice([0, 4], [2, 3]) = shape = [2, 3]
[ d e ]
[ ]
Args:
sp_input: The `SparseTensor` to split.
start: 1-D. tensor represents the start of the slice.
size: 1-D. tensor represents the size of the slice.
name: A name for the operation (optional).
Returns:
A `SparseTensor` objects resulting from splicing.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
start = ops.convert_to_tensor(start, dtypes.int64)
size = ops.convert_to_tensor(size, dtypes.int64)
with ops.name_scope(name, "SparseSlice", [sp_input]) as name:
output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
start,
size,
name=name)
return sparse_tensor.SparseTensor(output_indices, output_values,
output_shape)
@tf_export("sparse_to_dense")
@deprecation.deprecated(
None,
"Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.")
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops.sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse.reduce_max", "sparse_reduce_max")
@deprecation.deprecated_endpoints("sparse_reduce_max")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_max(sp_input, axis=None, keepdims=None,
reduction_axes=None, keep_dims=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Note: A gradient is not defined for this function, so it can't be used
in training models that need gradient descent.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
The values not defined in `sp_input` don't participate in the reduce max,
as opposed to be implicitly assumed 0 -- hence it can return negative values
for sparse `reduction_axes`. But, in case there are no values in
`reduction_axes`, it will reduce to 0. See second example below.
For example:
```python
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
tf.sparse.reduce_max(x) ==> 3
tf.sparse.reduce_max(x, 0) ==> [1, 3, 2]
tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]]
tf.sparse.reduce_max(x, [0, 1]) ==> 3
# 'y' represents [[-7, ?]
# [ 4, 3]
# [ ?, ?]
tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0]
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keepdims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced Tensor.
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return gen_sparse_ops.sparse_reduce_max(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims)
@tf_export("sparse.reduce_max_sparse", "sparse_reduce_max_sparse")
@deprecation.deprecated_endpoints("sparse_reduce_max_sparse")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_max_sparse(sp_input,
axis=None,
keepdims=None,
reduction_axes=None,
keep_dims=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Note: A gradient is not defined for this function, so it can't be used
in training models that need gradient descent.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keepdims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced SparseTensor.
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_max_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse.reduce_sum", "sparse_reduce_sum")
@deprecation.deprecated_endpoints("sparse_reduce_sum")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
reduction_axes=None, keep_dims=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse.reduce_sum(x) ==> 3
tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]]
tf.sparse.reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keepdims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced Tensor.
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims)
@tf_export("sparse.reduce_sum_sparse", "sparse_reduce_sum_sparse")
@deprecation.deprecated_endpoints("sparse_reduce_sum_sparse")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def sparse_reduce_sum_sparse(sp_input,
axis=None,
keepdims=None,
reduction_axes=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Note: A gradient is not defined for this function, so it can't be used
in training models that need gradient descent.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keepdims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced SparseTensor.
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse.to_dense", v1=["sparse.to_dense", "sparse_tensor_to_dense"])
@deprecation.deprecated_endpoints("sparse_tensor_to_dense")
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export(
"sparse.to_indicator", v1=["sparse.to_indicator", "sparse_to_indicator"])
@deprecation.deprecated_endpoints("sparse_to_indicator")
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values,
sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
@tf_export("sparse.merge", v1=["sparse.merge", "sparse_merge"])
@deprecation.deprecated_endpoints("sparse_merge")
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
This method generalizes to higher-dimensions by simply providing a list for
both the sp_ids as well as the vocab_size.
In this case the resulting `SparseTensor` has the following properties:
- `indices` is equivalent to `sp_ids[0].indices` with the last
dimension discarded and concatenated with
`sp_ids[0].values, sp_ids[1].values, ...`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn] + vocab_size`.
Args:
sp_ids: A single `SparseTensor` with `values` property of type `int32`
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
sp_values: A `SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
all `i`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
`Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
`vocab_size` is not a or list thereof and `sp_ids` is a list.
ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
"""
if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
sp_ids, sparse_tensor.SparseTensor):
sp_ids = [sp_ids]
if not (isinstance(vocab_size, ops.Tensor) or
isinstance(vocab_size, numbers.Integral)):
raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
type(vocab_size))
vocab_size = [vocab_size]
else:
if not isinstance(sp_ids, collections.Iterable):
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
"Found %s" % type(sp_ids))
if not isinstance(vocab_size, collections.Iterable):
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
"Found %s" % type(vocab_size))
for dim in vocab_size:
if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)):
raise TypeError(
"vocab_size has to be a list of Tensors or Python ints. Found %s" %
type(dim))
if len(sp_ids) != len(vocab_size):
raise ValueError("sp_ids and vocab_size have to have equal lengths.")
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
sp_values = _convert_to_sparse_tensor(sp_values)
ids = []
for sp_ids_dim in sp_ids:
ids_dim = sp_ids_dim.values
if sp_ids_dim.dtype != dtypes.int64:
ids_dim = math_ops.cast(ids_dim, dtypes.int64)
ids += [array_ops.expand_dims(ids_dim, axis=1)]
vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
new_values = sp_values.values
new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
if already_sorted:
return result
sorted_result = sparse_reorder(result)
return sparse_tensor.SparseTensor(
sorted_result.indices, sorted_result.values, new_shape)
@tf_export("sparse.retain", v1=["sparse.retain", "sparse_retain"])
@deprecation.deprecated_endpoints("sparse_retain")
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
@tf_export(
"sparse.reset_shape", v1=["sparse.reset_shape", "sparse_reset_shape"])
@deprecation.deprecated_endpoints("sparse_reset_shape")
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`. This will be a shape consisting of
all zeros if sp_input has no values.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
ValueError: If `new_shape` is determined during graph build to have
dimension sizes that are too small.
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, axis=0)
output_shape_tensor = math_ops.maximum(
array_ops.constant(0, dtype=dtypes.int64),
math_ops.add(dim_low_bound, array_ops.ones_like(in_shape)))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor)
# For cases where all shapes are known during graph construction
if (output_shape_tensor_const is not None and
sp_input.get_shape().is_fully_defined()):
in_shape_const = np.array(sp_input.get_shape().as_list())
if not np.all(in_shape_const <= output_shape_tensor_const):
raise ValueError(
"Requested new_shape should have dimension sizes >= sp_input.shape."
" Found new_shape (%s), sp_input.shape (%s)." %
(in_shape_const, output_shape_tensor_const))
output_shape_tensor = output_shape_tensor_const
else:
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies([
check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))
], output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
@tf_export(
"sparse.fill_empty_rows",
v1=["sparse.fill_empty_rows", "sparse_fill_empty_rows"])
@deprecation.deprecated_endpoints("sparse_fill_empty_rows")
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
(output_indices, output_values, empty_row_indicator,
unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows(
indices=sp_input.indices,
values=sp_input.values,
dense_shape=sp_input.dense_shape,
default_value=default_value)
return (sparse_tensor.SparseTensor(
indices=output_indices,
values=output_values,
dense_shape=sp_input.dense_shape), empty_row_indicator)
@tf_export(
"io.serialize_sparse", v1=["io.serialize_sparse", "serialize_sparse"])
@deprecation.deprecated_endpoints("serialize_sparse")
def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A 3-vector (1-D `Tensor`), with each column representing the serialized
`SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
@tf_export(
"io.serialize_many_sparse",
v1=["io.serialize_many_sparse", "serialize_many_sparse"])
@deprecation.deprecated_endpoints("serialize_many_sparse")
def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
represents serialized `SparseTensor`'s indices, values, and shape
(respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_many_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize `SparseTensor` objects.
The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
the last dimension stores serialized `SparseTensor` objects and the other N
dimensions (N >= 0) correspond to a batch. The ranks of the original
`SparseTensor` objects must all match. When the final `SparseTensor` is
created, its rank is the rank of the incoming `SparseTensor` objects plus N;
the sparse tensors have been concatenated along new dimensions, one for each
batch.
The output `SparseTensor` object's shape values for the original dimensions
are the max across the input `SparseTensor` objects' shape values for the
corresponding dimensions. The new dimensions match the size of the batch.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: The serialized `SparseTensor` objects.
The last dimension must have 3 columns.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` representing the deserialized `SparseTensor` objects.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export(
"io.deserialize_many_sparse",
v1=["io.deserialize_many_sparse", "deserialize_many_sparse"])
@deprecation.deprecated_endpoints("deserialize_many_sparse")
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse.reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("sparse.matmul", v1=["sparse.matmul", "sparse_tensor_dense_matmul"])
@deprecation.deprecated_endpoints("sparse_tensor_dense_matmul")
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of `A`. However, the
following input format is recommended for optimal behavior:
* If `adjoint_a == false`: `A` should be sorted in lexicographically
increasing order. Use `sparse.reorder` if you're not sure.
* If `adjoint_a == true`: `A` should be sorted in order of increasing
dimension 1 (i.e., "column major" order instead of "row major" order).
Using `tf.nn.embedding_lookup_sparse` for sparse multiplication:
It's not obvious but you can consider `embedding_lookup_sparse` as another
sparse and dense multiplication. In some situations, you may prefer to use
`embedding_lookup_sparse` even though you're not dealing with embeddings.
There are two questions to ask in the decision process: Do you need gradients
computed as sparse too? Is your sparse data represented as two
`SparseTensor`s: ids and values? There is more explanation about data format
below. If you answer any of these questions as yes, consider using
`tf.nn.embedding_lookup_sparse`.
Following explains differences between the expected SparseTensors:
For example if dense form of your sparse data has shape `[3, 5]` and values:
[[ a ]
[b c]
[ d ]]
`SparseTensor` format expected by `sparse_tensor_dense_matmul`:
`sp_a` (indices, values):
[0, 1]: a
[1, 0]: b
[1, 4]: c
[2, 2]: d
`SparseTensor` format expected by `embedding_lookup_sparse`:
`sp_ids` `sp_weights`
[0, 0]: 1 [0, 0]: a
[1, 0]: 0 [1, 0]: b
[1, 1]: 4 [1, 1]: c
[2, 0]: 2 [2, 0]: d
Deciding when to use `sparse_tensor_dense_matmul` vs.
`matmul`(a_is_sparse=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor `A` fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of `A` larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`a_is_sparse=True`.
This operation tends to perform well when `A` is more sparse, if the column
size of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between `sparse_tensor_dense_matmul`,
labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For
purposes of the comparison, the time spent converting from a `SparseTensor` to
a dense `Tensor` is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
`A = A.H if adjoint_a else A`
`B = B.H if adjoint_b else B`
`return A*B`
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops.sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
@tf_export("sparse.softmax", v1=["sparse.softmax", "sparse_softmax"])
@deprecation.deprecated_endpoints("sparse_softmax")
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(sp_input.indices, out_vals,
sp_input.dense_shape)
@tf_export("sparse.maximum", v1=["sparse.maximum", "sparse_maximum"])
@deprecation.deprecated_endpoints("sparse_maximum")
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMaximum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse.minimum", v1=["sparse.minimum", "sparse_minimum"])
@deprecation.deprecated_endpoints("sparse_minimum")
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMinimum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse.transpose", v1=["sparse.transpose", "sparse_transpose"])
@deprecation.deprecated_endpoints("sparse_transpose")
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.name_scope(name, "SparseTranspose", [sp_input]) as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm))
if perm_ is not None and sp_input.get_shape().is_fully_defined():
old_shape_ = sp_input.get_shape().as_list()
transposed_dense_shape = list(old_shape_) # Copy.
for i, p in enumerate(perm_):
transposed_dense_shape[i] = old_shape_[p]
else:
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values, transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _add_many_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_many_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _take_many_sparse_from_tensors_map(sparse_map_op,
sparse_handles,
rank=None,
name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse.reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError(
"sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops.take_many_sparse_from_tensors_map(
sparse_handles,
dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name,
name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
|
girving/tensorflow
|
tensorflow/python/ops/sparse_ops.py
|
Python
|
apache-2.0
| 90,949
|
import net_modules.gen
import numpy as np
import tensorflow as tf
from net_modules.common import default_activation
import math
import zutils.tf_math_funcs as tmf
from model.model import Factory
from model.options import PipelineOptionDefinition
from runner.train_pipeline import Pipeline as BasePipeline
from zutils.option_struct import OptionDef
from zutils.py_utils import *
import zutils.tf_graph_utils as tgu
from runner.resumable_data_module_wrapper import Net as ResumableDataModuleWrapper
net_factory = net_modules.gen.get_net_factory
net_instance = net_modules.gen.get_net_instance
class PipelineNetDef(BasePipeline): # base class for define the pipeline
def __init__(self, pipeline_scope_name=None, user_options=None, *args, **kwargs):
# initialize base pipeline object
super().__init__(*args, **kwargs)
# get pipeline options
if user_options is None:
user_options = dict()
self.option_def = OptionDef(user_options, PipelineOptionDefinition)
opt_struct = self.option_def["all"]
self.set_options(opt_struct)
# set pipeline name
if pipeline_scope_name is None:
pipeline_scope_name = "vae"
# remark: historically used "vae", and keep it for the compatiblity to the pretrained models
self.scope_name = pipeline_scope_name
# init all models ------------------------------------------------------------------
with self.graph.as_default():
# get model factory (do it first, otherwise cannot create others)
self.net_factory = Factory(self.scope_name, self.opt)
the_default_nonlinearity = self.net_factory.get_non_linearity()
with self.graph.as_default(), default_activation(the_default_nonlinearity):
self.train = NetDefTrain(self)
self.init_logger_saver()
# build posterior param
self.posterior = NetDefPosterior(self)
print("== Pipeline initialization is done")
def load_data_module(self, subset_name, mod2op_func, extra_fields=None, extra_options=None):
if extra_options is None:
extra_options = dict()
batch_size = None
if isinstance(mod2op_func, (int, float)):
batch_size = copy(mod2op_func)
def standard_mod2op_func(dm, df):
return self.data_module_to_tensor(dm, df, batch_size)
mod2op_func = standard_mod2op_func
if extra_fields is None:
extra_fields = []
elif isinstance(extra_fields, (tuple, set)):
extra_fields = list(extra_fields)
elif isinstance(extra_fields, str):
extra_fields = [extra_fields]
raw_data_module = self.net_factory.data_net_module(subset_name, extra_options=extra_options)
data_fields = ["data"]
data_fields.extend(self.opt.condition_list)
for k in extra_fields:
if k not in data_fields and k in raw_data_module.output_keys():
data_fields.append(k)
# ------------------
i = 0
data_key2ind = dict()
for v in raw_data_module.output_keys():
data_key2ind[v] = i
i += 1
# ------------------
is_train = False
if "is_train" in extra_options:
is_train = extra_options["is_train"]
del extra_options["is_train"]
data_list = mod2op_func(raw_data_module, data_fields)
if not isinstance(data_list, (list, tuple)):
data_list = [data_list]
condition_list = []
batch_size_from_data = tmf.get_shape(data_list[0])[0]
if batch_size is None:
batch_size = batch_size_from_data
else:
assert batch_size == batch_size_from_data, \
"inconsistency between data batch_size and specifiy batch_size"
if not is_train and self.opt.rotate_batch_samples:
raw_data_module = ResumableDataModuleWrapper(
raw_data_module, _num_sample_factors=batch_size
)
data_list = mod2op_func(raw_data_module, data_fields)
data_list = tgu.batch_rotating_data_buffer(
data_list, batch_size=batch_size, capacity=batch_size*2, enqueue_many=True
)
data_fields.append("boolmask_for_dominating_sample_in_batch_rotating")
condition_list.append("boolmask_for_dominating_sample_in_batch_rotating")
if not isinstance(data_list, (list, tuple)):
data_list = [data_list]
data = OrderedDict()
for k, v in zip(data_fields, data_list):
data[k] = v
# prepare condition dict
condition_list.extend(self.opt.condition_list)
cond_info_dict = dict()
for cond_name in condition_list:
cond_prefix, cond_type, cond_postfix = tgu.name_parts(cond_name)
assert cond_name in data, "not such data field for condition"
cond_data = data[cond_name]
cond_dict = dict()
if cond_type == "class":
# class condition
cond_dict["indexes"] = cond_data
cond_dict["num"] = raw_data_module.output_ranges()[data_key2ind["class"]]
elif cond_type == "landmark": # y, x
# landmark condition
cond_dict["size"] = tmf.get_shape(data["data"])[1:3]
# convert to normalized coordinate
if tmf.get_shape(cond_data)[2] > 2:
cond_dict["gate"] = cond_data[:, :, 0]
cond_data = cond_data[1:]
else:
cond_dict["gate"] = tf.ones_like(cond_data[:, :, 0])
cond_dict["location"] = \
(tf.to_float(cond_data)+0.5) / math.sqrt(cond_dict["size"][0]*cond_dict["size"][1])
assert self.opt.image_crop_size is None, "landmark condition does not work with image_crop_size"
elif cond_type == "optical_flow": # y, x
h, w = tmf.get_shape(cond_data)[1:3]
cond_data = tf.to_float(cond_data)
cond_dict["flow"] = cond_data / math.sqrt(h*w)
flow_offset_name = os.path.join(cond_prefix, "optical_flow_offset" + cond_postfix)
cond_dict["offset"] = data[flow_offset_name]
assert self.opt.image_crop_size is None, "optical_flow condition does not work with image_crop_size"
else:
cond_dict = cond_data # just a tensor, not dict
assert rbool(cond_dict), "internal error: cond_dict is not set"
cond_info_dict[cond_name] = cond_dict
return raw_data_module, data, cond_info_dict
def prefix_data_module(self, data_module):
if self.opt.data_class_list is not None:
assert hasattr(data_module, "limit_to_classes"), \
"Data loader do not support limit_to_classes"
data_module.limit_to_classes(self.opt.data_class_list)
class NetDefTrain(BasePipeline.TrainDef):
def __init__(self, pipeline):
super().__init__(pipeline, pipeline.opt)
# prepare training data
print("* Define training model")
print(" - data module")
self.raw_data_module, self.input, self.cond_info_dict = pipeline.load_data_module(
pipeline.opt.train_subset, self.create_data_tensor,
extra_options={
"shuffle": self.train_use_shuffle(),
"is_train": True,
}
)
pipeline.prefix_data_module(self.raw_data_module)
# Parallel condition
flat_cond, wrap_cond_func = recursive_flatten_with_wrap_func(tmf.is_tf_data, self.cond_info_dict)
flat_cond_len = len(flat_cond)
# REMARK: ****************** currently, cannot learn/finetune condition embedding
# create data input
stacked_data_input = list()
stacked_data_input.append(self.input["data"])
stacked_data_input.extend(flat_cond)
# expand data input to kwargs
def train_data_kwargs(data_tensors):
tp = 0
kwa = dict()
kwa["data_tensor"] = data_tensors[tp]
tp += 1
kwa["cond_info_dict"] = wrap_cond_func(data_tensors[tp:tp+flat_cond_len])
tp += flat_cond_len
return kwa
# --- training net definition
def standard_ae_training_net(data_tensors, default_reuse=None):
loss, disp_outputs, full_collection, _ = \
pipeline.net_factory.ae_training_net(
default_reuse=default_reuse,
**train_data_kwargs(data_tensors))
dvout = OrderedDict(graph=full_collection)
return loss, disp_outputs, dvout
ae_out = self.get_train_output(stacked_data_input, standard_ae_training_net)
ae_group = self.create_group("ae")
ae_group.loss = ae_out.loss
ae_group.outputs = ae_out.display_outputs
ae_group.ps_outputs = ae_out.ps_device_outputs
ae_group.device_outputs = ae_out.device_outputs
ae_group.data_module = self.data_module
# handle extra outputs --------------------
self.init_extra_outputs_and_indicators()
self.trainer_init(scope=pipeline.scope_name + "/trainer")
class NetDefPosterior:
def __init__(self, pipeline):
print("* Define posterior param model")
self.data_module, posterior_input, self.cond_info_dict = pipeline.load_data_module(
pipeline.opt.test_subset, pipeline.opt.test_batch_size, extra_fields=["class"])
pipeline.prefix_data_module(self.data_module)
if "class" in posterior_input:
posterior_label = posterior_input["class"]
else:
posterior_label = tf.zeros([posterior_input["data"].get_shape()[0], 1], dtype=tf.int32)
self.input = posterior_input
posterior_output, posterior_aux_out = pipeline.net_factory.posterior_net(self.input["data"], cond_info_dict=self.cond_info_dict)
self.outputs = posterior_aux_out
self.outputs["posterior_param"] = posterior_output
self.outputs["class_label"] = posterior_label
|
YutingZhang/lmdis-rep
|
model/pipeline_netdef.py
|
Python
|
apache-2.0
| 10,263
|
#!/usr/bin/env python
# -*- encoding=utf-8 -*-
from crawlers.electorates.base_provincePage import *
from utils import sanitize
def Crawler(nth, election_name, electionType):
target = 'assembly'
if 1 <= nth <= 17:
raise NotImplementedError('Korean National Election Committee does not have any data about electorates in each constituencies of the 1st~17th general election.')
# 지역구별 선거인수가 나오지 않고, 기초자치단체별 선거인수만 나옴.
# 선거인수를 받기 위해서는, 결국 개표 결과에 나오는 선거인수를 받아야 함.
elif 18 <= nth <= 20:
crawler = Constituency_ElectorCrawler_Old(int(nth), election_name, electionType, target)
elif nth == 21:
raise InvalidCrawlerError('electorates', nth, election_name, electionType, target)
#"최근선거"로 들어갈 때의 code: crawler = ElectorCrawler_Recent(int(nth), election_name, target)
else:
raise InvalidCrawlerError('electorates', nth, election_name, electionType, target)
return crawler
class Constituency_ElectorCrawler_GuOld(MultiCityCrawler_province):
# def parse_tr_xhtml(self, consti, city_name=None):
# consti = super(ElectorCrawler_GuOld, self).parse_tr_xhtml(consti, city_name)
# return consti
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.elemType = 'constituency_in_province'
self.isRecent = False
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson_GuOld.json'
self.urlParam_city_codes = dict(electionId='0000000000', electionCode=_election_name)
self.urlPath_sgg_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_sgg_list = dict(electionId='0000000000', electionName=_election_name,\
requestURI='/WEB-INF/jsp/electioninfo/0000000000/vc/vccp09.jsp',\
statementId='VCCP09_#90',\
oldElectionType=1, electionType=2, electionCode=2,\
townCode=-1, sggCityCode=-1)
class Constituency_ElectorCrawler_Old(MultiCityCrawler_province):
# def parse_tr_xhtml(self, consti, city_name=None):
# consti = super(ElectorCrawler_Old, self).parse_tr_xhtml(consti, city_name)
# return consti
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.elemType = 'constituency_in_province'
self.isRecent = False
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson_Old.json'
self.urlParam_city_codes = dict(electionId='0000000000', electionCode=_election_name,\
subElectionCode=2)
self.urlPath_sgg_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_sgg_list = dict(electionId='0000000000', electionName=_election_name,\
requestURI='/WEB-INF/jsp/electioninfo/0000000000/bi/bipb02.jsp',\
statementId='BIPB02_#3_2',\
oldElectionType=1, electionType=2, electionCode=2,\
searchType=3, townCode=-1, sggCityCode=-1)
self.urlParam_sgg_list['statementId'] = 'BIPB02_#3_2_1' if nth==20 else 'BIPB02_#3_2' #왜 얘만 다른지는 모르겠습니다.
# 재외국민선거 도입으로 지역구 선거인수와 비례대표 선거인수가 달라짐.
# 비례대표 선거인수는 시군구 단위를 따름.
if nth == 18: # 18대 총선(2008)은 재외국민선거 도입 이전: 지역구 선거인수와 비례대표 선거인수가 같음. 따라서 지역구 선거인수만 크롤링함.
pass
else:
self.next_crawler = LocalDivision_ElectorCrawler_Old(nth, _election_name, _election_type, _target)
class Constituency_ElectorCrawler_Recent(MultiCityCrawler_province):
# def parse_tr_xhtml(self, consti, city_name=None):
# consti = super(ElectorCrawler_Recent, self).parse_tr_xhtml(consti, city_name)
# return consti
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.election_name = _election_name
self.elemType = 'constituency_in_province'
self.isRecent = True
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson.json'
self.urlParam_city_codes = dict(electionId=_election_name, electionCode=2)
self.urlPath_sgg_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_sgg_list = dict(electionId=_election_name, \
requestURI='/WEB-INF/jsp/electioninfo/'+election_name+'/bi/bipb02.jsp',\
statementId='BIPB02_#3_2',\
electionCode=_election_type, searchType=3, townCode=-1)
self.next_crawler = LocalDivision_ElectorCrawler_Recent(nth, _election_name, _election_type, _target)
class LocalDivision_ElectorCrawler_Old(MultiCityCrawler_province):
# def parse_tr_xhtml(self, consti, city_name=None):
# consti = super(LocalDivision_ElectorCrawler_Old, self).parse_tr_xhtml(consti, city_name)
# return consti
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.elemType = 'local_division'
self.isRecent = False
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson_Old.json'
self.urlParam_city_codes = dict(electionId='0000000000', electionCode=_election_name,\
subElectionCode=2)
self.urlPath_town_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_town_list = dict(electionId='0000000000', electionName=_election_name,\
requestURI='/WEB-INF/jsp/electioninfo/0000000000/bi/bipb02.jsp',\
statementId='BIPB02_#2_1',\
oldElectionType=1, electionType=2, electionCode=2,\
searchType=2, townCode=-1, sggCityCode=-1)
self.urlParam_town_list['statementId'] = 'BIPB02_#2_1' if nth==20 else 'BIPB02_#2' #왜 얘만 다른지는 모르겠습니다.
class LocalDivision_ElectorCrawler_Recent(MultiCityCrawler_province):
# TODO: 이 곳의 electionCode는 2(지역구)가 아니라 7(비례대표).
# def parse_tr_xhtml(self, consti, city_name=None):
# consti = super(LocalDivision_ElectorCrawler_Recent, self).parse_tr_xhtml(consti, city_name)
# return consti
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.election_name = _election_name
self.elemType = 'local_division'
self.isRecent = True
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson.json'
self.urlParam_city_codes = dict(electionId=_election_name, electionCode=7)
self.urlPath_town_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_town_list = dict(electionId=_election_name,\
requestURI='/WEB-INF/jsp/electioninfo/'+election_name+'/bi/bipb02.jsp',\
statementId='BIPB02_#3_7',\
electionCode=7, searchType=3, townCode=-1)
|
comjoy91/SKorean-Election_result-Crawler
|
crawlers/electorates/electorates-original/assembly-original.py
|
Python
|
apache-2.0
| 6,879
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers import forms as r_forms
class CreateForm(r_forms.CreateForm):
tenant_id = forms.ThemableChoiceField(label=_("Project"))
# Other fields which are not defined in field_order will be
# placed in the default order.
field_order = ['name', 'tenant_id']
failure_url = 'horizon:admin:routers:index'
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
tenant_choices = [('', _("Select a project"))]
tenants, __ = api.keystone.tenant_list(request)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
self.fields['tenant_id'].choices = tenant_choices
class UpdateForm(r_forms.UpdateForm):
redirect_url = reverse_lazy('horizon:admin:routers:index')
|
openstack/horizon
|
openstack_dashboard/dashboards/admin/routers/forms.py
|
Python
|
apache-2.0
| 1,567
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import invalid
def test_method_1():
assert invalid.method_1() == 'method 1'
|
GoogleCloudPlatform/repo-automation-playground
|
xunit-autolabeler-v2/ast_parser/core/test_data/yaml/invalid/invalid_test.py
|
Python
|
apache-2.0
| 680
|
import random
lorem_ipsum = ('Cras sit amet nibh libero, in gravida nulla. Nulla vel metus scelerisque ante '
'sollicitudin commodo. Cras purus odio, vestibulum in vulputate at, tempus viverra '
'turpis. Fusce condimentum nunc ac nisi vulputate fringilla. Donec lacinia congue felis '
'in faucibus.')
def user(user_id):
return {
'id': user_id,
'name': 'User ' + str(user_id)
}
def project(project_id):
goal = random.randint(0, 100000)
funded = random.randint(0, goal)
return {
'id': project_id,
'name': "Project " + str(project_id),
'description': lorem_ipsum,
'goal': goal,
'amount_funded': funded,
'percent_funded': round((funded / goal) * 100)
}
def community(community_id):
return {
'id': community_id,
'name': "Community " + str(community_id),
'description': lorem_ipsum,
'subscribers': [user(i) for i in range(10)],
'projects': [project(i) for i in range(5)],
'comments': [comment(i) for i in range(5)]
}
def comment(user_id):
return {'user_id': user_id,
'time': 'August 25, 2014 at 9:30 PM',
'body': lorem_ipsum}
|
dylanseago/CommunityFund
|
server/communityfund/placeholder.py
|
Python
|
apache-2.0
| 1,237
|
# ----------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
#
# Carlos Eduardo Tapudima de Oliveira 1715310030
# Frederico Victor Alfaia Rodrigues 1515200030
# Joelson Pereira Lima 1715310060
# Lucas Gabriel Silveira Duarte 1715310053
# Reinaldo da Silva Vargas 1715310054
# Walter Nobre da Silva Conceição 1715310057
#
# 14 - Faça um programa que receba o ano de nascimento de uma
# pessoa e o ano atual, calcule e mostre:
# a) a idade dessa pessoa em anos;
# b) a idade dessa pessoa em meses;
# c) a idade dessa pessoa em dias;
# d) a idade dessa pessoa em semanas;
# ----------------------------------------------------------
print ('---------------------Cálculo da Idade---------------------')
try:
running_year = input ('Digite o ano atual: ')
running_year = int(running_year)
birth_year = input ('Digite o ano de nascimento: ')
birth_year = int(birth_year)
age = running_year - birth_year
age_in_months = age * 12
age_in_days = age * 365
age_in_weeks = age * 48
print (' A subtração',running_year,' - ',birth_year,' é igual a : ',age,' que é a idade da pessoa em anos')
print(' A multiplicação ',age,' * ',12,' é igual a : ',age_in_months,' que é a idade da pessoa em meses')
print(' A multiplicação ',age,' * ',365,' é igual a : ',age_in_days,' que é a idade da pessoa em dias')
print(' A multiplicação ',age,' * ',48,' é igual a: ',age_in_weeks,' que é a idade da pessoa em semanas')
except ValueError:
print(' Somente números são aceitos, tente novamente!')
|
jucimarjr/IPC_2017-1
|
lista1.5/lista1.5_questao14.py
|
Python
|
apache-2.0
| 1,767
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploads apk to rollout track with user fraction."""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
TRACK = 'rollout'
USER_FRACTION = 0.2
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('package_name',
help='The package name. Example: com.android.sample')
argparser.add_argument('apk_file',
nargs='?',
default='test.apk',
help='The path to the APK file to upload.')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv,
'androidpublisher',
'v3',
__doc__,
__file__, parents=[argparser],
scope='https://www.googleapis.com/auth/androidpublisher')
# Process flags and read their values.
package_name = flags.package_name
apk_file = flags.apk_file
try:
edit_request = service.edits().insert(body={}, packageName=package_name)
result = edit_request.execute()
edit_id = result['id']
apk_response = service.edits().apks().upload(
editId=edit_id, packageName=package_name, media_body=apk_file).execute()
print 'Version code %d has been uploaded' % apk_response['versionCode']
track_response = service.edits().tracks().update(
editId=edit_id,
track=TRACK,
packageName=package_name,
body={u'releases': [{
u'name': u'My first API release',
u'versionCodes': [str([apk_response['versionCode']])],
u'userFraction': USER_FRACTION,
u'status': u'inProgress',
}]}).execute()
print 'Track %s is set with releases: %s' % (
track_response['track'], str(track_response['releases']))
commit_request = service.edits().commit(
editId=edit_id, packageName=package_name).execute()
print 'Edit "%s" has been committed' % (commit_request['id'])
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
googlesamples/android-play-publisher-api
|
v3/python/upload_apks_rollout.py
|
Python
|
apache-2.0
| 2,789
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ConfigParser
import os
import subprocess
import unittest
from collections import namedtuple
from contextlib import contextmanager
from operator import eq, ne
from colors import strip_color
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.fs.archive import ZIP
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open
from pants_test.testutils.file_test_util import check_symlinks, contains_exact_files
PantsResult = namedtuple(
'PantsResult',
['command', 'returncode', 'stdout_data', 'stderr_data', 'workdir'])
def ensure_cached(expected_num_artifacts=None):
"""Decorator for asserting cache writes in an integration test.
:param task_cls: Class of the task to check the artifact cache for. (e.g. JarCreate)
:param expected_num_artifacts: Expected number of artifacts to be in the task's
cache after running the test. If unspecified, will
assert that the number of artifacts in the cache is
non-zero.
"""
def decorator(test_fn):
def wrapper(self, *args, **kwargs):
with temporary_dir() as artifact_cache:
cache_args = '--cache-write-to=["{}"]'.format(artifact_cache)
test_fn(self, *args + (cache_args,), **kwargs)
num_artifacts = 0
for (root, _, files) in os.walk(artifact_cache):
print(root, files)
num_artifacts += len(files)
if expected_num_artifacts is None:
self.assertNotEqual(num_artifacts, 0)
else:
self.assertEqual(num_artifacts, expected_num_artifacts)
return wrapper
return decorator
class PantsRunIntegrationTest(unittest.TestCase):
"""A base class useful for integration tests for targets in the same repo."""
PANTS_SUCCESS_CODE = 0
PANTS_SCRIPT_NAME = 'pants'
@classmethod
def has_python_version(cls, version):
"""Returns true if the current system has the specified version of python.
:param version: A python version string, such as 2.6, 3.
"""
try:
subprocess.call(['python%s' % version, '-V'])
return True
except OSError:
return False
def temporary_workdir(self, cleanup=True):
# We can hard-code '.pants.d' here because we know that will always be its value
# in the pantsbuild/pants repo (e.g., that's what we .gitignore in that repo).
# Grabbing the pants_workdir config would require this pants's config object,
# which we don't have a reference to here.
root = os.path.join(get_buildroot(), '.pants.d', 'tmp')
safe_mkdir(root)
return temporary_dir(root_dir=root, cleanup=cleanup, suffix='.pants.d')
def temporary_cachedir(self):
return temporary_dir(suffix='__CACHEDIR')
def temporary_sourcedir(self):
return temporary_dir(root_dir=get_buildroot())
@contextmanager
def source_clone(self, source_dir):
with self.temporary_sourcedir() as clone_dir:
target_spec_dir = os.path.relpath(clone_dir)
for dir_path, dir_names, file_names in os.walk(source_dir):
clone_dir_path = os.path.join(clone_dir, os.path.relpath(dir_path, source_dir))
for dir_name in dir_names:
os.mkdir(os.path.join(clone_dir_path, dir_name))
for file_name in file_names:
with open(os.path.join(dir_path, file_name), 'r') as f:
content = f.read()
if BuildFile._is_buildfile_name(file_name):
content = content.replace(source_dir, target_spec_dir)
with open(os.path.join(clone_dir_path, file_name), 'w') as f:
f.write(content)
yield clone_dir
def run_pants_with_workdir(self, command, workdir, config=None, stdin_data=None, extra_env=None,
**kwargs):
args = ['--no-pantsrc',
'--pants-workdir=' + workdir,
'--kill-nailguns',
'--print-exception-stacktrace']
if config:
config_data = config.copy()
ini = ConfigParser.ConfigParser(defaults=config_data.pop('DEFAULT', None))
for section, section_config in config_data.items():
ini.add_section(section)
for key, value in section_config.items():
ini.set(section, key, value)
ini_file_name = os.path.join(workdir, 'pants.ini')
with safe_open(ini_file_name, mode='w') as fp:
ini.write(fp)
args.append('--config-override=' + ini_file_name)
pants_script = os.path.join(get_buildroot(), self.PANTS_SCRIPT_NAME)
pants_command = [pants_script] + args + command
env = os.environ.copy()
env.update(extra_env or {})
proc = subprocess.Popen(pants_command, env=env, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
(stdout_data, stderr_data) = proc.communicate(stdin_data)
return PantsResult(pants_command, proc.returncode, stdout_data.decode("utf-8"),
stderr_data.decode("utf-8"), workdir)
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
"""Runs pants in a subprocess.
:param list command: A list of command line arguments coming after `./pants`.
:param config: Optional data for a generated ini file. A map of <section-name> ->
map of key -> value. If order in the ini file matters, this should be an OrderedDict.
:param kwargs: Extra keyword args to pass to `subprocess.Popen`.
:returns a tuple (returncode, stdout_data, stderr_data).
"""
with self.temporary_workdir() as workdir:
return self.run_pants_with_workdir(command, workdir, config, stdin_data, extra_env, **kwargs)
@contextmanager
def pants_results(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
"""Similar to run_pants in that it runs pants in a subprocess, but yields in order to give
callers a chance to do any necessary validations on the workdir.
:param list command: A list of command line arguments coming after `./pants`.
:param config: Optional data for a generated ini file. A map of <section-name> ->
map of key -> value. If order in the ini file matters, this should be an OrderedDict.
:param kwargs: Extra keyword args to pass to `subprocess.Popen`.
:returns a tuple (returncode, stdout_data, stderr_data).
"""
with self.temporary_workdir() as workdir:
yield self.run_pants_with_workdir(command, workdir, config, stdin_data, extra_env, **kwargs)
def bundle_and_run(self, target, bundle_name, bundle_jar_name=None, bundle_options=None,
args=None,
expected_bundle_jar_content=None,
expected_bundle_content=None,
library_jars_are_symlinks=True):
"""Creates the bundle with pants, then does java -jar {bundle_name}.jar to execute the bundle.
:param target: target name to compile
:param bundle_name: resulting bundle filename (minus .zip extension)
:param bundle_jar_name: monolithic jar filename (minus .jar extension), if None will be the
same as bundle_name
:param bundle_options: additional options for bundle
:param args: optional arguments to pass to executable
:param expected_bundle_content: verify the bundle zip content
:param expected_bundle_jar_content: verify the bundle jar content
:param library_jars_are_symlinks: verify library jars are symlinks if True, and actual
files if False. Default `True` because we always create symlinks for both external and internal
dependencies, only exception is when shading is used.
:return: stdout as a string on success, raises an Exception on error
"""
bundle_jar_name = bundle_jar_name or bundle_name
bundle_options = bundle_options or []
bundle_options = ['bundle.jvm'] + bundle_options + ['--archive=zip', target]
pants_run = self.run_pants(bundle_options)
self.assert_success(pants_run)
self.assertTrue(check_symlinks('dist/{bundle_name}-bundle/libs'.format(bundle_name=bundle_name),
library_jars_are_symlinks))
# TODO(John Sirois): We need a zip here to suck in external library classpath elements
# pointed to by symlinks in the run_pants ephemeral tmpdir. Switch run_pants to be a
# contextmanager that yields its results while the tmpdir workdir is still active and change
# this test back to using an un-archived bundle.
with temporary_dir() as workdir:
ZIP.extract('dist/{bundle_name}.zip'.format(bundle_name=bundle_name), workdir)
if expected_bundle_content:
self.assertTrue(contains_exact_files(workdir, expected_bundle_content))
if expected_bundle_jar_content:
with temporary_dir() as check_bundle_jar_dir:
bundle_jar = os.path.join(workdir, '{bundle_jar_name}.jar'
.format(bundle_jar_name=bundle_jar_name))
ZIP.extract(bundle_jar, check_bundle_jar_dir)
self.assertTrue(contains_exact_files(check_bundle_jar_dir, expected_bundle_jar_content))
optional_args = []
if args:
optional_args = args
java_run = subprocess.Popen(['java',
'-jar',
'{bundle_jar_name}.jar'.format(bundle_jar_name=bundle_jar_name)]
+ optional_args,
stdout=subprocess.PIPE,
cwd=workdir)
stdout, _ = java_run.communicate()
java_returncode = java_run.returncode
self.assertEquals(java_returncode, 0)
return stdout
def assert_success(self, pants_run, msg=None):
self.assert_result(pants_run, self.PANTS_SUCCESS_CODE, expected=True, msg=msg)
def assert_failure(self, pants_run, msg=None):
self.assert_result(pants_run, self.PANTS_SUCCESS_CODE, expected=False, msg=msg)
def assert_result(self, pants_run, value, expected=True, msg=None):
check, assertion = (eq, self.assertEqual) if expected else (ne, self.assertNotEqual)
if check(pants_run.returncode, value):
return
details = [msg] if msg else []
details.append(' '.join(pants_run.command))
details.append('returncode: {returncode}'.format(returncode=pants_run.returncode))
def indent(content):
return '\n\t'.join(content.splitlines())
if pants_run.stdout_data:
details.append('stdout:\n\t{stdout}'.format(stdout=indent(pants_run.stdout_data)))
if pants_run.stderr_data:
details.append('stderr:\n\t{stderr}'.format(stderr=indent(pants_run.stderr_data)))
error_msg = '\n'.join(details)
assertion(value, pants_run.returncode, error_msg)
def normalize(self, s):
"""Removes escape sequences (e.g. colored output) and all whitespace from string s."""
return ''.join(strip_color(s).split())
@contextmanager
def file_renamed(self, prefix, test_name, real_name):
real_path = os.path.join(prefix, real_name)
test_path = os.path.join(prefix, test_name)
try:
os.rename(test_path, real_path)
yield
finally:
os.rename(real_path, test_path)
|
dbentley/pants
|
tests/python/pants_test/pants_run_integration_test.py
|
Python
|
apache-2.0
| 11,466
|
"""
VirtWhoConf - File ``/etc/virt-who.conf`` and ``/etc/virt-who.d/*.conf``
========================================================================
The ``VirtWhoConf`` class parses the virt-who configuration files in `ini-like`
format.
.. note::
The configuration files under ``/etc/virt-who.d/`` might contain
sensitive information, like ``password``. It must be filtered.
"""
from .. import parser, LegacyItemAccess, IniConfigFile, add_filter
from insights.specs import virt_who_conf
filter_list = [
'[',
'interval',
'oneshot',
'type',
'server',
'debug',
'log_',
'configs',
'owner',
'env',
]
add_filter('virt_who_conf', filter_list)
@parser(virt_who_conf)
class VirtWhoConf(LegacyItemAccess, IniConfigFile):
"""
Parse the ``virt-who`` configuration files ``/etc/virt-who.conf`` and
``/etc/virt-who.d/*.conf``.
Sample configuration file::
## This is a template for virt-who global configuration files. Please see
## virt-who-config(5) manual page for detailed information.
##
## virt-who checks /etc/virt-who.conf for sections 'global' and 'defaults'.
## The sections and their values are explained below.
## NOTE: These sections retain their special meaning and function only when present in /etc/virt-who.conf
##
## You can uncomment and fill following template or create new file with
## similar content.
#Terse version of the general config template:
[global]
interval=3600
#reporter_id=
debug=False
oneshot=False
#log_per_config=False
#log_dir=
#log_file=
#configs=
[defaults]
owner=Satellite
env=Satellite
Examples:
>>> vwho_conf = shared[VirtWhoConf]
>>> 'global' in vwho_conf
True
>>> vwho_conf.has_option('global', 'debug')
True
>>> vwho_conf.get('global', 'oneshot')
"False"
>>> vwho_conf.getboolean('global', 'oneshot')
False
>>> vwho_conf.get('global', 'interval')
"3600"
>>> vwho_conf.getint('global', 'interval')
3600
>>> vwho_conf.items('defaults')
{'owner': 'Satellite', 'env': 'Satellite'}
"""
pass
|
wcmitchell/insights-core
|
insights/parsers/virt_who_conf.py
|
Python
|
apache-2.0
| 2,357
|